file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
index.d.ts | declare module "*.gif";
declare module "*.png"; |
||
ChessTest.ts | /// <reference path="../node_modules/@types/mocha/index.d.ts" />
import * as assert from 'assert';
import {Board, Player, King, Rook, Cell, GameStatus, Queen, Pawn, Move, PieceType} from '../src/Chess';
describe('ConnectFourBoard', () => {
it('checkmate', () => {
let board = new Board();
board.player = Player.Black;
board.add(new King(Player.White, new Cell(7, 7)));
board.add(new Rook(Player.White, new Cell(0, 0)));
board.add(new Rook(Player.White, new Cell(1, 1)));
board.add(new King(Player.Black, new Cell(7, 0)));
board.analyze();
assert.equal(GameStatus.Checkmate, board.status);
});
it('check', () => {
let board = new Board();
board.player = Player.Black;
board.add(new King(Player.Black, new Cell(0, 0)));
board.add(new Queen(Player.White, new Cell(1, 1)));
board.add(new King(Player.White, new Cell(7, 7)));
let res = board.analyze();
assert.equal(res, null);
let king = board.getKing(Player.Black);
assert.equal(1, king.moves.length);
assert.equal(Cell.ID(1, 1), king.moves[0].id());
});
it('promote', () => {
let board = new Board();
board.add(new King(Player.Black, new Cell(0, 0)));
board.add(new King(Player.White, new Cell(7, 7)));
board.add(new Pawn(Player.White, new Cell(3, 1)));
board.makeMove(new Move(new Cell(3, 1), new Cell(3, 0)));
let p = board.get(3, 0);
assert.equal(PieceType.Queen, p.type);
assert.equal(Player.White, p.player);
board.undo();
assert.equal(null, board.get(3, 0));
let p2 = board.get(3, 1);
assert.equal(PieceType.Pawn, p2.type);
assert.equal(Player.White, p2.player);
});
it('promote2', () => {
let board = new Board();
board.add(new King(Player.Black, new Cell(0, 0)));
board.add(new Rook(Player.Black, new Cell(7, 0)));
board.add(new King(Player.White, new Cell(7, 7)));
board.add(new Pawn(Player.White, new Cell(6, 1)));
board.makeMove(new Move(new Cell(6, 1), new Cell(7, 0)));
let p = board.get(7, 0);
assert.equal(PieceType.Queen, p.type);
assert.equal(Player.White, p.player);
assert.equal(3, board.getPieces().length);
board.undo(); | assert.equal(PieceType.Pawn, p2.type);
assert.equal(Player.White, p2.player);
});
}); | assert.equal(4, board.getPieces().length);
assert.equal(PieceType.Rook, board.get(7, 0).type);
let p2 = board.get(6, 1); |
tutorial.rs | extern crate env_logger;
extern crate gluon;
#[macro_use]
extern crate gluon_vm;
use gluon::base::types::Type;
use gluon::vm::api::{FunctionRef, Hole, OpaqueValue};
use gluon::vm;
use gluon::{RootedThread, Thread};
use gluon::import::{add_extern_module, Import};
use gluon::Compiler;
fn new_vm() -> RootedThread {
let vm = ::gluon::new_vm();
let import = vm.get_macros().get("import");
import
.as_ref()
.and_then(|import| import.downcast_ref::<Import>())
.expect("Import macro")
.add_path("..");
vm
}
#[test]
fn access_field_through_alias() {
let _ = ::env_logger::init();
let vm = new_vm();
Compiler::new()
.run_expr_async::<OpaqueValue<&Thread, Hole>>(&vm, "example", r#" import! std.int "#)
.sync_or_error()
.unwrap();
let mut add: FunctionRef<fn(i32, i32) -> i32> = vm.get_global("std.int.num.(+)").unwrap();
let result = add.call(1, 2);
assert_eq!(result, Ok(3));
}
#[test]
fn call_rust_from_gluon() {
let _ = ::env_logger::init();
fn factorial(x: i32) -> i32 {
if x <= 1 {
1
} else {
x * factorial(x - 1)
}
}
fn load_factorial(vm: &Thread) -> vm::Result<vm::ExternModule> {
vm::ExternModule::new(vm, primitive!(1 factorial))
}
let vm = new_vm();
// Introduce a module that can be loaded with `import! factorial`
add_extern_module(&vm, "factorial", load_factorial);
let expr = r#"
let factorial = import! factorial
factorial 5
"#;
let (result, _) = Compiler::new()
.run_expr_async::<i32>(&vm, "factorial", expr)
.sync_or_error()
.unwrap();
assert_eq!(result, 120);
}
#[test]
fn | () {
let _ = ::env_logger::init();
let vm = new_vm();
let result = Compiler::new()
.run_expr_async::<String>(
&vm,
"example",
" let string = import! \"std/string.glu\" in string.trim \" \
Hello world \t\" ",
)
.sync_or_error()
.unwrap();
let expected = ("Hello world".to_string(), Type::string());
assert_eq!(result, expected);
}
| use_string_module |
time.rs | use std::time::{Instant, Duration};
pub struct DeltaTimer(Option<Instant>);
impl DeltaTimer{
pub fn new() -> DeltaTimer{
DeltaTimer(None)
}
/* Reset the timer */
pub fn reset(&mut self){ self.0 = None }
pub fn duration(&mut self) -> Duration{
let now = Instant::now();
let delta = now.duration_since(match self.0 { Some(t) => t, None => now.clone() });
// Save the current time
self.0 = Some(now);
delta
}
pub fn delta_millis(&mut self) -> u64{
let duration = self.duration();
(duration.as_secs() * 1000) + (duration.subsec_nanos() as f64 / 1_000_000.0).floor() as u64
}
pub fn delta_nanos(&mut self) -> u64{
let duration = self.duration();
(duration.as_secs() * 1_000_000_000) + duration.subsec_nanos() as u64
}
pub fn delta_seconds(&mut self) -> u64{
self.delta().round() as u64
}
pub fn | (&mut self) -> f64{
let duration = self.duration();
(duration.as_secs() as f64) + (duration.subsec_nanos() as f64 / 1_000_000_000.0)
}
}
#[cfg(test)]
mod tests{
use super::DeltaTimer;
#[test]
fn delta_timer(){
// Setup logger
let _ = ::setup_logger();
let mut timer = DeltaTimer::new();
let _ = timer.duration();
use std::thread;
thread::sleep_ms(2000);
assert_eq!(timer.delta_seconds(), 2);
}
}
| delta |
options.go | package sns
import (
"fmt"
"strings"
"time"
"github.com/loopcontext/cloudevents-aws-transport/encoding"
)
// Option is the function signature required to be considered an nats.Option.
type Option func(*Transport) error
// WithEncoding sets the encoding for NATS transport.
func WithEncoding(encoding encoding.Encoding) Option {
return func(t *Transport) error {
t.Encoding = encoding
return nil
}
}
// WithShutdownTimeout sets the shutdown timeout when the http server is being shutdown.
func WithShutdownTimeout(timeout time.Duration) Option {
return func(t *Transport) error {
if t == nil {
return fmt.Errorf("http shutdown timeout option can not set nil transport")
}
t.ShutdownTimeout = &timeout
return nil
}
}
// WithPort sets the listening port for StartReceiver.
func WithPort(port int) Option |
// WithPath sets the path to receive cloudevents on for SNS transports.
func WithPath(path string) Option {
return func(t *Transport) error {
if t == nil {
return fmt.Errorf("http path option can not set nil transport")
}
path = strings.TrimSpace(path)
if len(path) == 0 {
return fmt.Errorf("http path option was given an invalid path: %q", path)
}
t.Path = path
return nil
}
}
| {
return func(t *Transport) error {
if t == nil {
return fmt.Errorf("http port option can not set nil transport")
}
if port < 0 || port > 65535 {
return fmt.Errorf("http port option was given an invalid port: %d", port)
}
t.Port = &port
return nil
}
} |
eyeparse.py | """
Classes to parse NDS files and create pandas DataFrames to hold eyetracking information.
"""
import numpy as np
import pandas as pd
class EyeDataParser(object):
def __init__(self, data_fpath):
self.data_fpath = data_fpath
# Make dictionaries to hold our data with repitions as keys
self._task_data = {}
self._radial_target_data = {}
self._frame_data = [] # List of frame times and frame numbers
# Holds the DATA lines as a list of lists for the current section of the ascii file
self._current_data = []
self._task_rep = 0 # don't have this in the data files, so we keep track manually
self._frame_num = -1 # Keep track of the frame number we're on
self._current_eye_flags = {'L':'NONE', 'R':'NONE'} # Eye flags for the current line
# Name for assigning to an empty list when we want to throw away data
# that's been accumulated so far.
self._trash_data = []
# Keep track of radial target information from each line that belongs
# to a radial target
self._RT_dist = None
self._RT_rep = None
self._RT_eccentricity = None
self._RT_direction = None
self._in_preamble = True
@property
def task_data(self):
return self._task_data
@property
def radial_target_data(self):
return self._radial_target_data
@property
def frames(self):
return self._frame_data
@property
def in_preamble(self):
return self._in_preamble
def parse_data(self):
with open(self.data_fpath) as f:
for line in f:
splitted_line = line.split()
if not splitted_line:
continue
line_type = splitted_line[0]
if line_type == "START":
self._in_preamble = False
if self.in_preamble:
continue
# if line_type can convert to int, it's a DATA line
try:
int(line_type)
line_type = 'DATA'
except ValueError as e:
pass
# execute the appropriate function
# we might not know how to parse the line so we should create one if needed
parse_func_name = '_parse_' + line_type
try:
getattr(self, parse_func_name)(splitted_line)
except AttributeError as e:
print "Passing unhandled line type", line_type
setattr(self, parse_func_name, self._parse_unknown_line)
getattr(self, parse_func_name)(splitted_line)
def | (self, data):
if data[-1] == '.....':
data_quality = 'GOOD'
else:
data_quality = 'BAD'
time = int(data[0])
eye_data = [np.nan if i=='.' else float(i) for i in data[1:7]]
full_data = [time] + eye_data + [self._current_eye_flags['L'], self._current_eye_flags['R']] + [data_quality]
RT_data = [self._RT_dist, self._RT_direction, self._RT_eccentricity]#, self._RT_rep]
if not any(i is None for i in RT_data): # if all RT data is valid
full_data = full_data + RT_data # extend the list with RT data
self._current_data.append(full_data)
def _parse_MSG(self, msg):
msg_type = msg[2]
if msg_type == 'RADIAL_TARGET_STARTED':
self._RT_rep = int(msg[4])
self._current_data = self._trash_data
elif msg_type == 'RADIAL_TARGET':
self._RT_dist = int(msg[3])
self._RT_eccentricity = int(msg[4])
self._RT_direction = int(msg[5])
if int(msg[6]): # Check if RADIAL_TARGET is starting
rt_data = self._radial_target_data
if not self._RT_rep in rt_data.keys():
rt_data[self._RT_rep] = []
self._current_data = rt_data[self._RT_rep] # Pointer to RT data repitition
else: # Clear current data if RADIAL_TARGET is ending.
self._current_data = self._trash_data
elif msg_type == 'TASK_STARTED':
self._task_rep += 1
self._RT_eccentricity = None
self._RT_direction = None
self._RT_dist = None
self._RT_rep = None
task_data = self._task_data
if not self._task_rep in task_data.keys():
task_data[self._task_rep] = []
self._current_data = task_data[self._task_rep] # Pointer to Task data repitition
elif msg_type == 'TASK_STOPPED':
self._current_data = self._trash_data
elif msg_type == 'RADIAL_TARGET_STOPPED':
self._current_data = self._trash_data
self._RT_eccentricity = None
self._RT_direction = None
self._RT_dist = None
self._RT_rep = None
def _parse_SSACC(self, sacc):
self._current_eye_flags[sacc[1]] = 'SACC'
def _parse_ESACC(self, sacc):
self._current_eye_flags[sacc[1]] = 'NONE'
def _parse_SFIX(self, fix):
self._current_eye_flags[fix[1]] = 'FIX'
def _parse_EFIX(self, fix):
self._current_eye_flags[fix[1]] = 'NONE'
def _parse_SBLINK(self, blink):
self._current_eye_flags[blink[1]] = 'BLINK'
def _parse_EBLINK(self, blink):
self._current_eye_flags[blink[1]] = 'NONE'
def _parse_BUTTON(self, button):
if int(button[3]):
self._frame_num += 1
time = int(button[1])
self._frame_data.append([time, self._frame_num])
def _parse_unknown_line(self, unk):
pass
class EyeDataFrameCreator(object):
def __init__(self, parser):
# NOTE These are currently publicly settable
self.task_df = self.create_task_df(parser.task_data)
self.radial_target_df = self.create_rt_df(parser.radial_target_data)
self.frame_df = self.create_frame_df(parser.frames)
def create_task_df(self, task_data):
col_arrays = [np.array(['left', 'left', 'left', 'right', 'right', 'right', 'left', 'right', 'both']),
np.array(['href x', 'href y', 'pupil area', 'href x', 'href y', 'pupil area', 'flag', 'flag', 'quality'])]
task_df = self._generate_dataframe(task_data, col_arrays)
return task_df
def create_rt_df(self, rt_data):
col_arrays = [np.array(['left', 'left', 'left', 'right', 'right', 'right', 'left', 'right', 'both', 'target', 'target', 'target']),
np.array(['href x', 'href y', 'pupil area', 'href x', 'href y', 'pupil area', 'flag', 'flag', 'quality', 'distance', 'direction', 'eccentricity'])]
rt_df = self._generate_dataframe(rt_data, col_arrays)
return rt_df
def _generate_dataframe(self, data_dict, columns):
dfs = {}
for rep, data in data_dict.iteritems():
dfs[rep] = pd.DataFrame(data)
# Combine the repitition DataFrames together into one giant DataFrame
# with appropriately labeled columns
full_df = pd.concat(dfs)
full_df.reset_index(0, inplace=True)
full_df.rename(columns={0:'time', 'level_0':'rep'}, inplace=True)
full_df = full_df.set_index(['rep', 'time'])
full_df.columns = columns
full_df.sortlevel(axis=1, inplace=True)
full_df.sortlevel(axis=0, inplace=True)
return full_df
def create_frame_df(self, frames):
df = pd.DataFrame(frames, columns=('time', 'press num'))
return df
if __name__ == '__main__':
import os
data_fpath = '../../data/raw/gaze/kre/kre_cafe.asc'
eye_data_parser = EyeDataParser(data_fpath)
print "Parsing..."
eye_data_parser.parse_data()
print "Done!\n"
print "Creating DataFrames..."
eye_dfs = EyeDataFrameCreator(eye_data_parser)
| _parse_DATA |
alazar_channel.py | import math
from qcodes.instrument.channel import InstrumentChannel
from qcodes.utils import validators as vals
from .alazar_multidim_parameters import Alazar0DParameter, Alazar1DParameter, Alazar2DParameter
from .acquisition_parameters import AcqVariablesParam, NonSettableDerivedParameter
class AlazarChannel(InstrumentChannel):
| """
A single channel for Alazar card. This can capture and return multiple different views of the data
An Alazar acquisition consists of one or more buffers, each buffer contains on or more records and each
records contains a number of samples. The time series (samples as a function of time) may optionally be
demodulated by a user selected frequency.
single_point: Averaged over Buffers and Records and integrated over samples
records_trace: Averaged over buffers and integrated over samples. 1D trace as a function of records.
buffers_vs_records_trace: Integrated over samples. 2D array of buffers vs records
samples_trace: Averaged over buffers and records. 1D trace as a function of samples (time)
records_vs_samples_trace: Averaged over buffers. 2D array of records vs samples
"""
def __init__(self, parent, name: str, demod: bool=False, alazar_channel: str='A',
average_buffers: bool=True,
average_records: bool=True,
integrate_samples: bool=True) -> None:
super().__init__(parent, name)
self.dimensions = 3 - int(average_buffers) - int(average_records) - int(integrate_samples)
self._average_buffers = average_buffers
self._average_records = average_records
self._integrate_samples = integrate_samples
if self.dimensions > 0:
self._stale_setpoints = True
else:
self._stale_setpoints = False
if self.dimensions >= 3:
raise RuntimeError("Alazar controller only supports up to 2 dimensional arrays")
self._demod = demod
if demod:
self.add_parameter('demod_freq',
label='demod freq',
initial_value=1e5,
vals=vals.Numbers(1e5,500e6),
get_cmd=None, set_cmd=None)
self.add_parameter('demod_type',
label='demod type',
initial_value='magnitude',
vals=vals.Enum('magnitude', 'phase'),
get_cmd=None, set_cmd=None)
self.add_parameter('alazar_channel',
label='Alazar Channel',
val_mapping={'A': 0, 'B': 1},
initial_value=alazar_channel,
get_cmd=None, set_cmd=None)
if not average_records:
self.add_parameter('records_per_buffer',
label='records_per_buffer',
initial_value=1,
vals=vals.Ints(min_value=1),
get_cmd=None, set_cmd=None)
else:
self.add_parameter('records_per_buffer',
label='records_per_buffer',
alternative='num_averages',
parameter_class=NonSettableDerivedParameter)
if not average_buffers:
self.add_parameter('buffers_per_acquisition',
label='records_per_buffer',
initial_value=1,
vals=vals.Ints(min_value=1),
get_cmd=None, set_cmd=None)
else:
self.add_parameter('buffers_per_acquisition',
label='records_per_buffer',
alternative='num_averages',
parameter_class=NonSettableDerivedParameter)
self.add_parameter('num_averages',
#label='num averages',
check_and_update_fn=self._update_num_avg,
default_fn= lambda : 1,
parameter_class=AcqVariablesParam)
if self.dimensions == 0:
self.add_parameter('data',
label='mydata',
unit='V',
integrate_samples=integrate_samples,
average_records=average_records,
average_buffers=average_buffers,
parameter_class=Alazar0DParameter)
elif self.dimensions == 1:
self.add_parameter('data',
label='mydata',
unit='V',
integrate_samples=integrate_samples,
average_records=average_records,
average_buffers=average_buffers,
parameter_class=Alazar1DParameter)
elif self.dimensions == 2:
self.add_parameter('data',
label='mydata',
unit='V',
integrate_samples=integrate_samples,
average_records=average_records,
average_buffers=average_buffers,
parameter_class=Alazar2DParameter)
else:
raise RuntimeError("Not implemented here")
self.acquisition_kwargs = {}
def prepare_channel(self) -> None:
if self.dimensions > 0:
self.data.set_setpoints_and_labels()
self._stale_setpoints = False
def _update_num_avg(self, value: int, **kwargs) -> None:
# allow unused **kwargs as the function may be
# called with additional unused args
if not self._average_buffers and not self._average_records:
if value==1:
return
else:
raise RuntimeError("You requested averaging but are neither averaging over buffers or records")
if self._average_buffers and not self._average_records:
self.buffers_per_acquisition._save_val(value)
elif self._average_records and not self._average_buffers:
self.records_per_buffer._save_val(value)
elif self._average_buffers and self._average_records:
max_samples = self._parent.board_info['max_samples']
samples_per_rec = self._parent.samples_per_record()
tot_samples = value * samples_per_rec
if tot_samples > max_samples:
records = math.floor(max_samples/samples_per_rec)
buffers = math.ceil(max_samples/records)
else:
records = value
buffers = 1
self.buffers_per_acquisition._save_val(buffers)
self.records_per_buffer._save_val(records) |
|
EditableInput.js | import React, { Component, Fragment, forwardRef } from 'react';
import { bool, func, string, oneOfType, object, shape } from 'prop-types';
import styled, { css } from 'styled-components';
import AnimatedBorder from 'components/AnimatedBorder';
import Button from 'components/Button';
import ErrorMessage from 'components/ErrorMessage';
import Spinner from 'components/Spinner';
import SuccessIcon from 'components/Icon/icons/alert/success';
import { mountainMeadow } from 'styles/colors';
import noop from 'utils/noop';
import TextInput from './TextInput';
const Wrapper = styled.div`
display: inline-block;
width: 100%;
${({ error }) =>
error &&
css`
padding-bottom: 2em;
`};
`;
const ButtonGroup = styled.div`
display: flex;
align-items: center;
padding: 0em 1em;
direction: ltr;
& > button:first-of-type {
margin-right: 0.5em;
}
`;
const propTypes = {
/** @ignore */
forwardedRef: oneOfType([func, object]),
/** @ignore */
style: shape(),
/** Label of the component */
label: string,
/** Error message of the component */
error: string,
/** Loading state of the component, show spinner if `true` */
isLoading: bool,
/** Success state of the component, show successful icon if `true` */
isSuccess: bool,
/** Text of save button */
saveLabel: string,
/** Text of cancel button */
cancelLabel: string,
/** Initial input content value, use it if you want to leave the component
* [uncontrolled](https://reactjs.org/docs/uncontrolled-components.html) */
defaultValue: string,
/** Input content value */
value: string,
/**
* Validation function, to be executed when user clicked Save button
*
* @param {string} value current value
* @returns {bool} the validation result
*/
validate: func,
/**
* Callback function, to be executed if `validate` is provided and `value` is invalid when user clicked Save button
*
* @param {string} value current value
*/
onError: func,
/**
* Callback function, to be executed when user clicked Save button
*
* @param {string} value saved value
*/
onSave: func,
/**
* Callback function, to be executed when user clicked Cancel button
*
* @param {string} value last saved value
*/
onCancel: func,
/**
* Callback function, to be executed when user type in input field
*
* @param {SyntheticEvent} event https://reactjs.org/docs/events.html
*/
onChange: func,
/**
* Callback function, to be executed when user focus on input field
*
* @param {SyntheticEvent} event https://reactjs.org/docs/events.html
*/
onFocus: func,
/**
* Callback function, to be executed when user blur on input field
*
* @param {SyntheticEvent} event https://reactjs.org/docs/events.html
*/
onBlur: func,
};
const defaultProps = {
forwardedRef: null,
style: null,
label: null,
error: null,
isLoading: false,
isSuccess: false,
saveLabel: 'Save',
cancelLabel: 'Cancel',
defaultValue: null,
value: null,
validate: () => true,
onError: noop,
onSave: noop,
onCancel: noop,
onChange: noop,
onFocus: noop,
onBlur: noop,
};
export class | extends Component {
static propTypes = propTypes;
static defaultProps = defaultProps;
state = {
oldValue: this.props.defaultValue || this.props.value,
currentValue: this.props.defaultValue || this.props.value,
isDirty: false,
isFocused: false,
};
handleSave = () => {
const { currentValue } = this.state;
const { onError, onSave, validate } = this.props;
if (validate(currentValue)) {
this.setState({ isDirty: false, oldValue: currentValue }, () =>
onSave(currentValue)
);
} else {
onError(currentValue);
}
};
handleCancel = () => {
const { oldValue } = this.state;
this.setState({ isDirty: false, currentValue: oldValue }, () =>
this.props.onCancel(oldValue)
);
};
handleChange = e => {
const currentValue = e.target.value;
const { oldValue } = this.state;
this.setState({
isDirty: currentValue !== oldValue,
currentValue,
});
this.props.onChange(e);
};
handleFocus = e => {
this.setState({ isFocused: true });
this.props.onFocus(e);
};
handleBlur = e => {
this.setState({ isFocused: false });
this.props.onBlur(e);
};
render() {
const { isDirty, isFocused, currentValue } = this.state;
const {
forwardedRef,
style,
label,
error,
saveLabel,
cancelLabel,
isLoading,
isSuccess,
onFocus,
onBlur,
onChange,
onSave,
onCancel,
value,
defaultValue,
...remainProps
} = this.props;
return (
<Wrapper style={style}>
<AnimatedBorder
label={label}
dirty={!!currentValue}
error={!!error}
focused={isFocused}
>
<TextInput
label={label}
value={currentValue}
onChange={this.handleChange}
onFocus={this.handleFocus}
onBlur={this.handleBlur}
disabled={isLoading}
{...remainProps}
ref={forwardedRef}
/>
<ButtonGroup>
{isDirty && !isLoading && (
<Fragment>
<Button onClick={this.handleCancel} variant="link">
{cancelLabel}
</Button>
<Button onClick={this.handleSave} variant="primary" solid>
{saveLabel}
</Button>
</Fragment>
)}
{isLoading && <Spinner />}
{isSuccess && <SuccessIcon color={mountainMeadow.main} />}
</ButtonGroup>
</AnimatedBorder>
<ErrorMessage error={error} />
</Wrapper>
);
}
}
// eslint-disable-next-line react/no-multi-comp
const EditableInputWithRef = forwardRef((props, ref) => (
<EditableInput {...props} forwardedRef={ref} />
));
EditableInputWithRef.displayName = 'EditableInput';
EditableInputWithRef.propTypes = propTypes;
EditableInputWithRef.defaultProps = defaultProps;
export default EditableInputWithRef;
| EditableInput |
test_tnetstring.py | import unittest
import random
import math
import io
import struct
from mitmproxy.io import tnetstring
MAXINT = 2 ** (struct.Struct('i').size * 8 - 1) - 1
FORMAT_EXAMPLES = {
b'0:}': {},
b'0:]': [],
b'51:5:hello,39:11:12345678901#4:this,4:true!0:~4:\x00\x00\x00\x00,]}':
{b'hello': [12345678901, b'this', True, None, b'\x00\x00\x00\x00']},
b'5:12345#': 12345,
b'12:this is cool,': b'this is cool',
b'19:this is unicode \xe2\x98\x85;': u'this is unicode \u2605',
b'0:,': b'',
b'0:;': u'',
b'0:~': None,
b'4:true!': True,
b'5:false!': False,
b'10:\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00,': b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
b'24:5:12345#5:67890#5:xxxxx,]': [12345, 67890, b'xxxxx'],
b'18:3:0.1^3:0.2^3:0.3^]': [0.1, 0.2, 0.3],
b'243:238:233:228:223:218:213:208:203:198:193:188:183:178:173:168:163:158:153:148:143:138:133:128:123:118:113:108:103:99:95:91:87:83:79:75:71:67:63:59:55:51:47:43:39:35:31:27:23:19:15:11:hello-there,]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]': [[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[b'hello-there']]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]] # noqa
}
def get_random_object(random=random, depth=0):
"""Generate a random serializable object."""
# The probability of generating a scalar value increases as the depth increase.
# This ensures that we bottom out eventually.
if random.randint(depth, 10) <= 4: | n = random.randint(0, 10)
l = []
for _ in range(n):
l.append(get_random_object(random, depth + 1))
return l
if what == 1:
n = random.randint(0, 10)
d = {}
for _ in range(n):
n = random.randint(0, 100)
k = str([random.randint(32, 126) for _ in range(n)])
d[k] = get_random_object(random, depth + 1)
return d
else:
what = random.randint(0, 4)
if what == 0:
return None
if what == 1:
return True
if what == 2:
return False
if what == 3:
if random.randint(0, 1) == 0:
return random.randint(0, MAXINT)
else:
return -1 * random.randint(0, MAXINT)
n = random.randint(0, 100)
return bytes([random.randint(32, 126) for _ in range(n)])
class Test_Format(unittest.TestCase):
def test_roundtrip_format_examples(self):
for data, expect in FORMAT_EXAMPLES.items():
self.assertEqual(expect, tnetstring.loads(data))
self.assertEqual(
expect, tnetstring.loads(tnetstring.dumps(expect)))
self.assertEqual((expect, b''), tnetstring.pop(data))
def test_roundtrip_format_random(self):
for _ in range(500):
v = get_random_object()
self.assertEqual(v, tnetstring.loads(tnetstring.dumps(v)))
self.assertEqual((v, b""), tnetstring.pop(tnetstring.dumps(v)))
def test_roundtrip_format_unicode(self):
for _ in range(500):
v = get_random_object()
self.assertEqual(v, tnetstring.loads(tnetstring.dumps(v)))
self.assertEqual((v, b''), tnetstring.pop(tnetstring.dumps(v)))
def test_roundtrip_big_integer(self):
i1 = math.factorial(30000)
s = tnetstring.dumps(i1)
i2 = tnetstring.loads(s)
self.assertEqual(i1, i2)
class Test_FileLoading(unittest.TestCase):
def test_roundtrip_file_examples(self):
for data, expect in FORMAT_EXAMPLES.items():
s = io.BytesIO()
s.write(data)
s.write(b'OK')
s.seek(0)
self.assertEqual(expect, tnetstring.load(s))
self.assertEqual(b'OK', s.read())
s = io.BytesIO()
tnetstring.dump(expect, s)
s.write(b'OK')
s.seek(0)
self.assertEqual(expect, tnetstring.load(s))
self.assertEqual(b'OK', s.read())
def test_roundtrip_file_random(self):
for _ in range(500):
v = get_random_object()
s = io.BytesIO()
tnetstring.dump(v, s)
s.write(b'OK')
s.seek(0)
self.assertEqual(v, tnetstring.load(s))
self.assertEqual(b'OK', s.read())
def test_error_on_absurd_lengths(self):
s = io.BytesIO()
s.write(b'1000000000:pwned!,')
s.seek(0)
with self.assertRaises(ValueError):
tnetstring.load(s)
self.assertEqual(s.read(1), b':')
def suite():
loader = unittest.TestLoader()
suite = unittest.TestSuite()
suite.addTest(loader.loadTestsFromTestCase(Test_Format))
suite.addTest(loader.loadTestsFromTestCase(Test_FileLoading))
return suite | what = random.randint(0, 1)
if what == 0: |
minter_test.go | package raft
import (
"fmt"
"math/big"
"strings"
"testing"
"time"
"github.com/coreos/etcd/raft/raftpb"
mapset "github.com/deckarep/golang-set"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" | "github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/rlp"
)
const TEST_URL = "enode://3d9ca5956b38557aba991e31cf510d4df641dce9cc26bfeb7de082f0c07abb6ede3a58410c8f249dabeecee4ad3979929ac4c7c496ad20b8cfdd061b7401b4f5@127.0.0.1:21003?discport=0&raftport=50404"
func TestSignHeader(t *testing.T) {
//create only what we need to test the seal
var testRaftId uint16 = 5
config := &node.Config{Name: "unit-test", DataDir: ""}
nodeKey := config.NodeKey()
raftProtocolManager := &ProtocolManager{raftId: testRaftId}
raftService := &RaftService{nodeKey: nodeKey, raftProtocolManager: raftProtocolManager}
minter := minter{eth: raftService}
//create some fake header to sign
fakeParentHash := common.HexToHash("0xc2c1dc1be8054808c69e06137429899d")
header := &types.Header{
ParentHash: fakeParentHash,
Number: big.NewInt(1),
Difficulty: big.NewInt(1),
GasLimit: uint64(0),
GasUsed: uint64(0),
Coinbase: minter.coinbase,
Time: uint64(time.Now().UnixNano()),
}
headerHash := header.Hash()
extraDataBytes := minter.buildExtraSeal(headerHash)
var seal *extraSeal
err := rlp.DecodeBytes(extraDataBytes[:], &seal)
if err != nil {
t.Fatalf("Unable to decode seal: %s", err.Error())
}
// Check raftId
sealRaftId, err := hexutil.DecodeUint64("0x" + string(seal.RaftId)) //add the 0x prefix
if err != nil {
t.Errorf("Unable to get RaftId: %s", err.Error())
}
if sealRaftId != uint64(testRaftId) {
t.Errorf("RaftID does not match. Expected: %d, Actual: %d", testRaftId, sealRaftId)
}
//Identify who signed it
sig := seal.Signature
pubKey, err := crypto.SigToPub(headerHash.Bytes(), sig)
if err != nil {
t.Fatalf("Unable to get public key from signature: %s", err.Error())
}
//Compare derived public key to original public key
if pubKey.X.Cmp(nodeKey.X) != 0 {
t.Errorf("Signature incorrect!")
}
}
func TestAddLearner_whenTypical(t *testing.T) {
raftService := newTestRaftService(t, 1, []uint64{1}, []uint64{})
propPeer := func() {
raftid, err := raftService.raftProtocolManager.ProposeNewPeer(TEST_URL, true)
if err != nil {
t.Errorf("propose new peer failed %v\n", err)
}
if raftid != raftService.raftProtocolManager.raftId+1 {
t.Errorf("1. wrong raft id. expected %d got %d\n", raftService.raftProtocolManager.raftId+1, raftid)
}
}
go propPeer()
select {
case confChange := <-raftService.raftProtocolManager.confChangeProposalC:
if confChange.Type != raftpb.ConfChangeAddLearnerNode {
t.Errorf("expected ConfChangeAddLearnerNode but got %s", confChange.Type.String())
}
if uint16(confChange.NodeID) != raftService.raftProtocolManager.raftId+1 {
t.Errorf("2. wrong raft id. expected %d got %d\n", raftService.raftProtocolManager.raftId+1, uint16(confChange.NodeID))
}
case <-time.After(time.Millisecond * 200):
t.Errorf("add learner conf change not received")
}
}
func TestPromoteLearnerToPeer_whenTypical(t *testing.T) {
learnerRaftId := uint16(3)
raftService := newTestRaftService(t, 2, []uint64{2}, []uint64{uint64(learnerRaftId)})
promoteToPeer := func() {
ok, err := raftService.raftProtocolManager.PromoteToPeer(learnerRaftId)
if err != nil || !ok {
t.Errorf("promote learner to peer failed %v\n", err)
}
}
go promoteToPeer()
select {
case confChange := <-raftService.raftProtocolManager.confChangeProposalC:
if confChange.Type != raftpb.ConfChangeAddNode {
t.Errorf("expected ConfChangeAddNode but got %s", confChange.Type.String())
}
if uint16(confChange.NodeID) != learnerRaftId {
t.Errorf("2. wrong raft id. expected %d got %d\n", learnerRaftId, uint16(confChange.NodeID))
}
case <-time.After(time.Millisecond * 200):
t.Errorf("add learner conf change not received")
}
}
func TestAddLearnerOrPeer_fromLearner(t *testing.T) {
raftService := newTestRaftService(t, 3, []uint64{2}, []uint64{3})
_, err := raftService.raftProtocolManager.ProposeNewPeer(TEST_URL, true)
if err == nil {
t.Errorf("learner should not be allowed to add learner or peer")
}
if err != nil && !strings.Contains(err.Error(), "learner node can't add peer or learner") {
t.Errorf("expect error message: propose new peer failed, got: %v\n", err)
}
_, err = raftService.raftProtocolManager.ProposeNewPeer(TEST_URL, false)
if err == nil {
t.Errorf("learner should not be allowed to add learner or peer")
}
if err != nil && !strings.Contains(err.Error(), "learner node can't add peer or learner") {
t.Errorf("expect error message: propose new peer failed, got: %v\n", err)
}
}
func TestPromoteLearnerToPeer_fromLearner(t *testing.T) {
learnerRaftId := uint16(3)
raftService := newTestRaftService(t, 2, []uint64{1}, []uint64{2, uint64(learnerRaftId)})
_, err := raftService.raftProtocolManager.PromoteToPeer(learnerRaftId)
if err == nil {
t.Errorf("learner should not be allowed to promote to peer")
}
if err != nil && !strings.Contains(err.Error(), "learner node can't promote to peer") {
t.Errorf("expect error message: propose new peer failed, got: %v\n", err)
}
}
func enodeId(id string, ip string, raftPort int) string {
return fmt.Sprintf("enode://%s@%s?discport=0&raftport=%d", id, ip, raftPort)
}
func peerList(url string) (error, []*enode.Node) {
var nodes []*enode.Node
node, err := enode.ParseV4(url)
if err != nil {
return fmt.Errorf("Node URL %s: %v\n", url, err), nil
}
nodes = append(nodes, node)
return nil, nodes
}
func newTestRaftService(t *testing.T, raftId uint16, nodes []uint64, learners []uint64) *RaftService {
//create only what we need to test add learner node
config := &node.Config{Name: "unit-test", DataDir: ""}
nodeKey := config.NodeKey()
enodeIdStr := fmt.Sprintf("%x", crypto.FromECDSAPub(&nodeKey.PublicKey)[1:])
url := enodeId(enodeIdStr, "127.0.0.1:21001", 50401)
err, peers := peerList(url)
if err != nil {
t.Errorf("getting peers failed %v", err)
}
raftProtocolManager := &ProtocolManager{
raftId: raftId,
bootstrapNodes: peers,
confChangeProposalC: make(chan raftpb.ConfChange),
removedPeers: mapset.NewSet(),
confState: raftpb.ConfState{Nodes: nodes, Learners: learners},
}
raftService := &RaftService{nodeKey: nodeKey, raftProtocolManager: raftProtocolManager}
return raftService
} | "github.com/ethereum/go-ethereum/node" |
main.go | package main
import (
"fmt"
"sync"
"time"
"github.com/kataras/iris"
"github.com/kataras/iris/websocket"
)
type clientPage struct {
Title string
Host string
}
func main() {
app := iris.New()
app.RegisterView(iris.HTML("./templates", ".html")) // select the html engine to serve templates
ws := websocket.New(websocket.Config{})
// register the server on an endpoint.
// see the inline javascript code i the websockets.html, this endpoint is used to connect to the server.
app.Get("/my_endpoint", ws.Handler())
// serve the javascript built'n client-side library,
// see weboskcets.html script tags, this path is used.
app.Any("/iris-ws.js", func(ctx iris.Context) {
ctx.Write(websocket.ClientSource)
})
app.StaticWeb("/js", "./static/js") // serve our custom javascript code
app.Get("/", func(ctx iris.Context) {
ctx.ViewData("", clientPage{"Client Page", "localhost:8080"})
ctx.View("client.html")
})
Conn := make(map[websocket.Connection]bool)
var myChatRoom = "room1"
var mutex = new(sync.Mutex)
ws.OnConnection(func(c websocket.Connection) {
c.Join(myChatRoom)
mutex.Lock()
Conn[c] = true
mutex.Unlock()
c.On("chat", func(message string) {
if message == "leave" {
c.Leave(myChatRoom)
c.To(myChatRoom).Emit("chat", "Client with ID: "+c.ID()+" left from the room and cannot send or receive message to/from this room.")
c.Emit("chat", "You have left from the room: "+myChatRoom+" you cannot send or receive any messages from others inside that room.")
return
}
})
c.OnDisconnect(func() {
mutex.Lock()
delete(Conn, c)
mutex.Unlock()
fmt.Printf("\nConnection with ID: %s has been disconnected!\n", c.ID())
})
})
var delay = 1 * time.Second
go func() {
i := 0
for {
mutex.Lock()
broadcast(Conn, fmt.Sprintf("aaaa %d\n", i))
mutex.Unlock()
time.Sleep(delay)
i++
}
}()
go func() {
i := 0
for range time.Tick(1 * time.Second) { //another way to get clock signal
mutex.Lock()
broadcast(Conn, fmt.Sprintf("aaaa2 %d\n", i))
mutex.Unlock()
time.Sleep(delay)
i++
}
}()
app.Run(iris.Addr(":8080"))
}
func broadcast(Conn map[websocket.Connection]bool, message string) | {
for k := range Conn {
k.To("room1").Emit("chat", message)
}
} |
|
xmlrs_reader_tests.rs | extern crate quick_xml;
use quick_xml::events::{BytesStart, Event};
use quick_xml::{Reader, Result};
use std::borrow::Cow;
use std::str::from_utf8;
#[test]
fn sample_1_short() {
test(
include_str!("documents/sample_1.xml"),
include_str!("documents/sample_1_short.txt"),
true,
);
}
#[test]
fn sample_1_full() {
test(
include_str!("documents/sample_1.xml"),
include_str!("documents/sample_1_full.txt"),
false,
);
}
#[test]
fn sample_2_short() {
test(
include_str!("documents/sample_2.xml"),
include_str!("documents/sample_2_short.txt"),
true,
);
}
#[test]
fn sample_2_full() {
test(
include_str!("documents/sample_2.xml"),
include_str!("documents/sample_2_full.txt"),
false,
);
}
#[cfg(feature = "escape-html")]
#[test]
fn html5() {
test(
include_str!("documents/html5.html"),
include_str!("documents/html5.txt"),
false,
);
}
#[test]
fn escaped_characters() {
test(
r#"<e attr=""Hello"">'a' < '&'</e>"#,
r#"
|StartElement(e [attr=""Hello""])
|Characters('a' < '&')
|EndElement(e)
|EndDocument
"#,
true,
)
}
#[cfg(feature = "escape-html")]
#[test]
fn escaped_characters_html() {
test(
r#"<e attr="ℏÈℓ𝕝⨀">╔╗╔╗╔╗</e>"#,
r#"
|StartElement(e [attr="ℏÈℓ𝕝⨀"])
|Characters(╔╗╔╗╔╗)
|EndElement(e)
|EndDocument
"#,
true,
)
}
#[cfg(feature = "encoding")]
#[test]
fn encoded_characters() {
test_bytes(
b"\
<?xml version = \"1.0\" encoding = \"Shift_JIS\" ?>\n\
<a>\x82\xA0\x82\xA2\x82\xA4</a>\
",
"
|StartDocument(1.0, Shift_JIS)
|StartElement(a)
|Characters(あいう)
|EndElement(a)
|EndDocument
"
.as_bytes(),
true,
)
}
// #[test]
// fn sample_3_short() {
// test(
// include_str!("documents/sample_3.xml"),
// include_str!("documents/sample_3_short.txt"),
// true
// );
// }
// #[test]
// fn sample_3_full() {
// test(
// include_str!("documents/sample_3.xml"),
// include_str!("documents/sample_3_full.txt"),
// false
// );
// }
// #[test]
// fn sample_4_short() {
// test(
// include_str!("documents/sample_4.xml"),
// include_str!("documents/sample_4_short.txt"),
// true
// );
// }
// #[test]
// fn sample_4_full() {
// test(
// include_str!("documents/sample_4.xml"),
// include_str!("documents/sample_4_full.txt"),
// false
// );
//
// }
#[test]
// FIXME: Trips on the first byte-order-mark byte
// Expected: StartDocument(1.0, utf-16)
// Found: InvalidUtf8([255, 254]; invalid utf-8 sequence of 1 bytes from index 0)
#[ignore]
fn sample_5_short() {
test_bytes(
include_bytes!("documents/sample_5_utf16bom.xml"),
include_bytes!("documents/sample_5_short.txt"),
true,
);
}
#[test]
fn sample_ns_short() {
test(
include_str!("documents/sample_ns.xml"),
include_str!("documents/sample_ns_short.txt"),
true,
);
}
#[test]
fn eof_1() {
test(
r#"<?xml"#,
r#"Error: Unexpected EOF during reading XmlDecl."#,
true,
);
}
#[test]
fn bad_1() {
test(
r#"<?xml&.,"#,
r#"1:6 Error: Unexpected EOF during reading XmlDecl."#,
true,
);
}
#[test]
fn dashes_in_comments() {
test(
r#"<!-- comment -- --><hello/>"#,
r#"
|Error: Unexpected token '--'
"#,
true,
);
test(
r#"<!-- comment ---><hello/>"#,
r#"
|Error: Unexpected token '--'
"#,
true,
);
}
#[test]
fn tabs_1() {
test(
"\t<a>\t<b/></a>",
r#"
StartElement(a)
EmptyElement(b)
EndElement(a)
EndDocument
"#,
true,
);
}
#[test]
fn issue_83_duplicate_attributes() {
// Error when parsing attributes won't stop main event reader
// as it is a lazy operation => add ending events
test(
r#"<hello><some-tag a='10' a="20"/></hello>"#,
"
|StartElement(hello)
|1:30 EmptyElement(some-tag, attr-error: error while parsing \
attribute at position 16: Duplicate attribute at position 9 and 16)
|EndElement(hello)
|EndDocument
",
true,
);
}
#[test]
fn issue_93_large_characters_in_entity_references() {
test(
r#"<hello>&𤶼;</hello>"#,
r#"
|StartElement(hello)
|1:10 FailedUnescape([38, 240, 164, 182, 188, 59]; Error while escaping character at range 1..5: Unrecognized escape symbol: Ok("𤶼"))
|EndElement(hello)
|EndDocument
"#,
true,
)
}
#[test]
fn issue_98_cdata_ending_with_right_bracket() {
test(
r#"<hello><![CDATA[Foo [Bar]]]></hello>"#,
r#"
|StartElement(hello)
|Characters()
|CData(Foo [Bar])
|Characters()
|EndElement(hello)
|EndDocument
"#,
false,
)
}
#[test]
fn issue_105_unexpected_double_dash() {
test(
r#"<hello>-- </hello>"#,
r#"
|StartElement(hello)
|Characters(-- )
|EndElement(hello)
|EndDocument
"#,
false,
);
test(
r#"<hello>--</hello>"#,
r#"
|StartElement(hello)
|Characters(--)
|EndElement(hello)
|EndDocument
"#,
false,
);
test(
r#"<hello>--></hello>"#,
r#"
|StartElement(hello)
|Characters(-->)
|EndElement(hello)
|EndDocument
"#,
false,
);
test(
r#"<hello><![CDATA[--]]></hello>"#,
r#"
|StartElement(hello)
|Characters()
|CData(--)
|Characters()
|EndElement(hello)
|EndDocument
"#,
false,
);
}
#[test]
fn issue_attributes_have_no_default_namespace() {
// At the moment, the 'test' method doesn't render namespaces for attribute names.
// This test only checks whether the default namespace got applied to the EmptyElement.
test(
r#"<hello xmlns="urn:foo" x="y"/>"#,
r#"
|EmptyElement({urn:foo}hello [x="y"])
|EndDocument
"#,
true,
);
}
#[test]
fn issue_default_namespace_on_outermost_element() {
// Regression test
test(
r#"<hello xmlns="urn:foo"/>"#,
r#"
|EmptyElement({urn:foo}hello)
|EndDocument
"#,
true,
);
}
#[test]
fn default_namespace_applies_to_end_elem() {
test(
r#"<hello xmlns="urn:foo" x="y">
<inner/>
</hello>"#,
r#"
|StartElement({urn:foo}hello [x="y"])
|EmptyElement({urn:foo}inner)
|EndElement({urn:foo}hello)
|EndDocument
"#,
true,
);
}
fn test(input: &str, output: &str, is_short: bool) {
test_bytes(input.as_bytes(), output.as_bytes(), is_short);
}
fn test_bytes(input: &[u8], output: &[u8], is_short: bool) {
// Normalize newlines on Windows to just \n, which is what the reader and
// writer use.
// let input = input.replace("\r\n", "\n");
// let input = input.as_bytes();
// let output = output.replace("\r\n", "\n");
// let output = output.as_bytes();
let mut reader = Reader::from_reader(input);
reader
.trim_text(is_short)
.check_comments(true)
.expand_empty_elements(false);
let mut spec_lines = SpecIter(output).enumerate();
let mut buf = Vec::new();
let mut ns_buffer = Vec::new();
if !is_short {
// discard first whitespace
reader.read_event(&mut buf).unwrap();
}
loop {
buf.clear();
let event = reader.read_namespaced_event(&mut buf, &mut ns_buffer);
let line = xmlrs_display(&event, &reader);
if let Some((n, spec)) = spec_lines.next() {
if spec.trim() == "EndDocument" {
break;
}
if line.trim() != spec.trim() {
panic!(
"\n-------------------\n\
Unexpected event at line {}:\n\
Expected: {}\nFound: {}\n\
-------------------\n",
n + 1,
spec,
line
);
}
} else {
if line == "EndDocument" {
break;
| ent: {}", line);
}
if !is_short && line.starts_with("StartDocument") {
// advance next Characters(empty space) ...
if let Ok(Event::Text(ref e)) = reader.read_event(&mut Vec::new()) {
if e.iter().any(|b| match *b {
b' ' | b'\r' | b'\n' | b'\t' => false,
_ => true,
}) {
panic!("Reader expects empty Text event after a StartDocument");
}
} else {
panic!("Reader expects empty Text event after a StartDocument");
}
}
}
}
fn namespace_name(n: &Option<&[u8]>, name: &[u8]) -> String {
match *n {
Some(n) => format!("{{{}}}{}", from_utf8(n).unwrap(), from_utf8(name).unwrap()),
None => from_utf8(name).unwrap().to_owned(),
}
}
fn make_attrs(e: &BytesStart) -> ::std::result::Result<String, String> {
let mut atts = Vec::new();
for a in e.attributes() {
match a {
Ok(a) => {
if a.key.len() < 5 || !a.key.starts_with(b"xmlns") {
atts.push(format!(
"{}=\"{}\"",
from_utf8(a.key).unwrap(),
from_utf8(&*a.unescaped_value().unwrap()).unwrap()
));
}
}
Err(e) => return Err(e.to_string()),
}
}
Ok(atts.join(", "))
}
// FIXME: The public API differs based on the "encoding" feature
fn decode<'a>(text: &'a [u8], reader: &Reader<&[u8]>) -> Cow<'a, str> {
#[cfg(feature = "encoding")]
let decoded = reader.decode(text);
#[cfg(not(feature = "encoding"))]
let decoded = Cow::Borrowed(reader.decode(text).unwrap());
decoded
}
fn xmlrs_display(opt_event: &Result<(Option<&[u8]>, Event)>, reader: &Reader<&[u8]>) -> String {
match opt_event {
Ok((ref n, Event::Start(ref e))) => {
let name = namespace_name(n, decode(e.name(), reader).as_bytes());
match make_attrs(e) {
Ok(ref attrs) if attrs.is_empty() => format!("StartElement({})", &name),
Ok(ref attrs) => format!("StartElement({} [{}])", &name, &attrs),
Err(e) => format!("StartElement({}, attr-error: {})", &name, &e),
}
}
Ok((ref n, Event::Empty(ref e))) => {
let name = namespace_name(n, decode(e.name(), reader).as_bytes());
match make_attrs(e) {
Ok(ref attrs) if attrs.is_empty() => format!("EmptyElement({})", &name),
Ok(ref attrs) => format!("EmptyElement({} [{}])", &name, &attrs),
Err(e) => format!("EmptyElement({}, attr-error: {})", &name, &e),
}
}
Ok((ref n, Event::End(ref e))) => {
let name = namespace_name(n, decode(e.name(), reader).as_bytes());
format!("EndElement({})", name)
}
Ok((_, Event::Comment(ref e))) => format!("Comment({})", from_utf8(e).unwrap()),
Ok((_, Event::CData(ref e))) => format!("CData({})", from_utf8(e).unwrap()),
Ok((_, Event::Text(ref e))) => match e.unescaped() {
Ok(c) => match from_utf8(decode(&*c, reader).as_bytes()) {
Ok(c) => format!("Characters({})", c),
Err(ref err) => format!("InvalidUtf8({:?}; {})", e.escaped(), err),
},
Err(ref err) => format!("FailedUnescape({:?}; {})", e.escaped(), err),
},
Ok((_, Event::Decl(ref e))) => {
let version_cow = e.version().unwrap();
let version = from_utf8(version_cow.as_ref()).unwrap();
let encoding_cow = e.encoding().unwrap().unwrap();
let encoding = from_utf8(encoding_cow.as_ref()).unwrap();
format!("StartDocument({}, {})", version, encoding)
}
Ok((_, Event::Eof)) => format!("EndDocument"),
Ok((_, Event::PI(ref e))) => format!("ProcessingInstruction(PI={})", from_utf8(e).unwrap()),
Err(ref e) => format!("Error: {}", e),
Ok((_, Event::DocType(ref e))) => format!("DocType({})", from_utf8(e).unwrap()),
}
}
struct SpecIter<'a>(&'a [u8]);
impl<'a> Iterator for SpecIter<'a> {
type Item = &'a str;
fn next(&mut self) -> Option<&'a str> {
let start = self
.0
.iter()
.position(|b| !matches!(*b, b' ' | b'\r' | b'\n' | b'\t' | b'|' | b':' | b'0'..=b'9'))
.unwrap_or(0);
if let Some(p) = self.0.windows(3).position(|w| w == b")\r\n") {
let (prev, next) = self.0.split_at(p + 1);
self.0 = &next[1..];
Some(from_utf8(&prev[start..]).expect("Error decoding to uft8"))
} else if let Some(p) = self.0.windows(2).position(|w| w == b")\n") {
let (prev, next) = self.0.split_at(p + 1);
self.0 = next;
Some(from_utf8(&prev[start..]).expect("Error decoding to uft8"))
} else if self.0.is_empty() {
None
} else {
let p = self.0;
self.0 = &[];
Some(from_utf8(&p[start..]).unwrap())
}
}
}
| }
panic!("Unexpected ev |
task_group_definition.py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class TaskGroupDefinition(Model):
"""TaskGroupDefinition.
:param display_name:
:type display_name: str
| :type is_expanded: bool
:param name:
:type name: str
:param tags:
:type tags: list of str
:param visible_rule:
:type visible_rule: str
"""
_attribute_map = {
'display_name': {'key': 'displayName', 'type': 'str'},
'is_expanded': {'key': 'isExpanded', 'type': 'bool'},
'name': {'key': 'name', 'type': 'str'},
'tags': {'key': 'tags', 'type': '[str]'},
'visible_rule': {'key': 'visibleRule', 'type': 'str'}
}
def __init__(self, display_name=None, is_expanded=None, name=None, tags=None, visible_rule=None):
super(TaskGroupDefinition, self).__init__()
self.display_name = display_name
self.is_expanded = is_expanded
self.name = name
self.tags = tags
self.visible_rule = visible_rule | :param is_expanded:
|
no-content.exception.spec.ts | import { NoContentException } from './no-content.exception';
describe('NoContentException', () => {
let exception: NoContentException;
beforeEach(async () => {
exception = new NoContentException();
});
it('should initialize', () => {
expect(exception).toBeDefined();
expect(exception.getResponse()).toBeDefined();
expect(exception.getStatus()).toBeDefined();
}); | }); |
|
maskformer.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
from mmdet.core import INSTANCE_OFFSET
from mmdet.core.visualization import imshow_det_bboxes
from ..builder import DETECTORS, build_backbone, build_head, build_neck
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class MaskFormer(SingleStageDetector):
r"""Implementation of `Per-Pixel Classification is
NOT All You Need for Semantic Segmentation
<https://arxiv.org/pdf/2107.06278>`_."""
def __init__(self,
backbone,
neck=None,
panoptic_head=None,
train_cfg=None,
test_cfg=None,
init_cfg=None):
super(SingleStageDetector, self).__init__(init_cfg=init_cfg)
self.backbone = build_backbone(backbone)
if neck is not None:
self.neck = build_neck(neck)
panoptic_head.update(train_cfg=train_cfg)
panoptic_head.update(test_cfg=test_cfg)
self.panoptic_head = build_head(panoptic_head)
self.num_things_classes = self.panoptic_head.num_things_classes
self.num_stuff_classes = self.panoptic_head.num_stuff_classes
self.num_classes = self.panoptic_head.num_classes
self.train_cfg = train_cfg
self.test_cfg = test_cfg
def forward_dummy(self, img, img_metas):
"""Used for computing network flops. See
`mmdetection/tools/analysis_tools/get_flops.py`
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_metas (list[Dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
"""
super(SingleStageDetector, self).forward_train(img, img_metas)
x = self.extract_feat(img)
outs = self.panoptic_head(x, img_metas)
return outs
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_masks,
gt_semantic_seg,
gt_bboxes_ignore=None,
**kargs):
"""
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_metas (list[Dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box.
gt_masks (list[BitmapMasks]): true segmentation masks for each box
used if the architecture supports a segmentation task.
gt_semantic_seg (list[tensor]): semantic segmentation mask for
images.
gt_bboxes_ignore (list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
Defaults to None.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
# add batch_input_shape in img_metas
super(SingleStageDetector, self).forward_train(img, img_metas)
x = self.extract_feat(img)
losses = self.panoptic_head.forward_train(x, img_metas, gt_bboxes,
gt_labels, gt_masks,
gt_semantic_seg,
gt_bboxes_ignore)
return losses
def simple_test(self, img, img_metas, **kwargs):
"""Test without augmentation."""
feat = self.extract_feat(img)
mask_results = self.panoptic_head.simple_test(feat, img_metas,
**kwargs)
results = []
for mask in mask_results:
result = {'pan_results': mask.detach().cpu().numpy()}
results.append(result)
return results
def aug_test(self, imgs, img_metas, **kwargs):
raise NotImplementedError
def onnx_export(self, img, img_metas):
|
def show_result(self,
img,
result,
score_thr=0.3,
bbox_color=(72, 101, 241),
text_color=(72, 101, 241),
mask_color=None,
thickness=2,
font_size=13,
win_name='',
show=False,
wait_time=0,
out_file=None):
"""Draw `result` over `img`.
Args:
img (str or Tensor): The image to be displayed.
result (dict): The results.
score_thr (float, optional): Minimum score of bboxes to be shown.
Default: 0.3.
bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines.
The tuple of color should be in BGR order. Default: 'green'.
text_color (str or tuple(int) or :obj:`Color`):Color of texts.
The tuple of color should be in BGR order. Default: 'green'.
mask_color (None or str or tuple(int) or :obj:`Color`):
Color of masks. The tuple of color should be in BGR order.
Default: None.
thickness (int): Thickness of lines. Default: 2.
font_size (int): Font size of texts. Default: 13.
win_name (str): The window name. Default: ''.
wait_time (float): Value of waitKey param.
Default: 0.
show (bool): Whether to show the image.
Default: False.
out_file (str or None): The filename to write the image.
Default: None.
Returns:
img (Tensor): Only if not `show` or `out_file`.
"""
img = mmcv.imread(img)
img = img.copy()
pan_results = result['pan_results']
# keep objects ahead
ids = np.unique(pan_results)[::-1]
legal_indices = ids != self.num_classes # for VOID label
ids = ids[legal_indices]
labels = np.array([id % INSTANCE_OFFSET for id in ids], dtype=np.int64)
segms = (pan_results[None] == ids[:, None, None])
# if out_file specified, do not show image in window
if out_file is not None:
show = False
# draw bounding boxes
img = imshow_det_bboxes(
img,
segms=segms,
labels=labels,
class_names=self.CLASSES,
bbox_color=bbox_color,
text_color=text_color,
mask_color=mask_color,
thickness=thickness,
font_size=font_size,
win_name=win_name,
show=show,
wait_time=wait_time,
out_file=out_file)
if not (show or out_file):
return img
| raise NotImplementedError |
vald.go | //
// Copyright (C) 2019-2020 Vdaas.org Vald team ( kpango, rinx, kmrmt )
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Package vald provides vald starter functionality
package vald
import (
"context"
"testing"
"github.com/vdaas/vald/hack/benchmark/internal/starter"
)
type server struct {
// cfg *config.Data
}
func New(opts ...Option) starter.Starter { | opt(srv)
}
return srv
}
func (s *server) Run(ctx context.Context, tb testing.TB) func() {
tb.Helper()
// TODO (@hlts2): Make when divided gateway.
return func() {}
} | srv := new(server)
for _, opt := range append(defaultOptions, opts...) { |
trace.go | package trace
import (
"context"
gcpexporter "github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/trace"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/sdk/resource"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
"go.opentelemetry.io/otel/semconv"
otrace "go.opentelemetry.io/otel/trace"
"github.com/ditointernet/go-dito/lib/errors"
)
// Params encondes necessary input data to initialize a new Tracer.
type Params struct {
IsProductionEnvironment bool
ApplicationName string
// TraceRatio indicates how often the system should collect traces.
// Use it with caution: It may overload the system and also be too expensive to mantain its value too high in a high throuput system
// Values vary between 0 and 1, with 0 meaning No Sampling and 1 meaning Always Sampling.
// Values lower than 0 are treated as 0 and values greater than 1 are treated as 1.
TraceRatio float64
}
// NewTracer creates a new Tracer.
// It produces the tracer it self and a flush function that should be used to deliver any trace residue in cases of system shutdown.
// If your application is running outside of Google Cloud, make sure that your `GOOGLE_APPLICATION_CREDENTIALS` env variable is properly set.
func NewTracer(params Params) (otrace.Tracer, func(context.Context) error, error) |
// MustNewTracer creates a new Tracer.
// It produces the tracer it self and a flush function that should be used to deliver any trace residue in cases of system shutdown.
// It panics if any error is found during tracer construction.
// If your application is running outside of Google Cloud, make sure that your `GOOGLE_APPLICATION_CREDENTIALS` env variable is properly set.
func MustNewTracer(params Params) (otrace.Tracer, func(context.Context) error) {
tracer, flush, err := NewTracer(params)
if err != nil {
panic(err)
}
return tracer, flush
}
| {
if params.ApplicationName == "" {
return nil, nil, errors.NewMissingRequiredDependency("ApplicationName")
}
tOpts := []sdktrace.TracerProviderOption{
sdktrace.WithResource(resource.NewWithAttributes(
attribute.KeyValue{
Key: semconv.ServiceNameKey,
Value: attribute.StringValue(params.ApplicationName),
},
)),
}
if params.IsProductionEnvironment {
exporter, err := gcpexporter.NewExporter()
if err != nil {
return nil, nil, err
}
tOpts = append(tOpts, sdktrace.WithSampler(sdktrace.TraceIDRatioBased(params.TraceRatio)))
tOpts = append(tOpts, sdktrace.WithBatcher(exporter))
}
tp := sdktrace.NewTracerProvider(tOpts...)
otel.SetTracerProvider(tp)
otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(
propagation.TraceContext{},
propagation.Baggage{},
))
return tp.Tracer(params.ApplicationName), tp.Shutdown, nil
} |
c_api.rs | use libc::*;
use std::ptr;
#[repr(C)]
#[allow(dead_code, non_camel_case_types)]
pub enum EcBackendId {
NULL = 0,
JERASURE_RS_VAND = 1,
JERASURE_RS_CAUCHY = 2,
FLAT_XOR_HD = 3,
ISA_L_RS_VAND = 4,
SHSS = 5,
LIBERASURECODE_RS_VAND = 6,
}
#[repr(C)]
pub enum EcChecksumType {
NONE = 1,
CRC32 = 2,
MD5 = 3,
}
#[repr(C)]
pub struct EcArgs {
pub k: c_int,
pub m: c_int,
pub w: c_int,
pub hd: c_int,
pub priv_args: [u64; 5],
pub ct: EcChecksumType,
}
pub type Desc = c_int;
pub type ErrorCode = c_uint;
pub const EBACKENDNOTSUPP: u32 = 200;
pub const EECMETHODNOTIMPL: u32 = 201;
pub const EBACKENDINITERR: u32 = 202;
pub const EBACKENDINUSE: u32 = 203;
pub const EBACKENDNOTAVAIL: u32 = 204;
pub const EBADCHKSUM: u32 = 205;
pub const EINVALIDPARAMS: u32 = 206;
pub const EBADHEADER: u32 = 207;
pub const EINSUFFFRAGS: u32 = 208;
#[link(name = "erasurecode", kind = "static")]
#[link(name = "gf_complete", kind = "static")]
#[link(name = "Jerasure", kind = "static")]
#[link(name = "Xorcode", kind = "static")]
extern "C" {
/// Create a liberasurecode instance and return a descriptor
/// for use with EC operations (encode, decode, reconstruct)
///
/// @param id - one of the supported backends as
/// defined by ec_backend_id_t
/// @param ec_args - arguments to the EC backend
/// arguments common to all backends
/// k - number of data fragments
/// m - number of parity fragments
/// w - word size, in bits
/// hd - hamming distance (=m for Reed-Solomon)
/// ct - fragment checksum type (stored with the fragment metadata)
/// backend-specific arguments
/// null_args - arguments for the null backend
/// flat_xor_hd, jerasure do not require any special args
///
/// @return liberasurecode instance descriptor (int > 0)
///
fn liberasurecode_instance_create(id: EcBackendId, args: *const EcArgs) -> Desc;
/// Close a liberasurecode instance
///
/// @param desc - liberasurecode descriptor to close
///
/// @return 0 on success, otherwise non-zero error code
///
fn liberasurecode_instance_destroy(desc: Desc) -> c_int;
/// Erasure encode a data buffer
///
/// @param desc - liberasurecode descriptor/handle
/// from liberasurecode_instance_create()
/// @param orig_data - data to encode
/// @param orig_data_size - length of data to encode
/// @param encoded_data - pointer to _output_ array (char **) of k data
/// fragments (char *), allocated by the callee
/// @param encoded_parity - pointer to _output_ array (char **) of m parity
/// fragments (char *), allocated by the callee
/// @param fragment_len - pointer to _output_ length of each fragment, assuming
/// all fragments are the same length
///
/// @return 0 on success, -error code otherwise
///
fn liberasurecode_encode(
desc: Desc,
orig_data: *const u8,
orig_data_size: u64,
encoded_data: *mut *mut *mut u8,
encoded_parity: *mut *mut *mut u8,
fragment_len: *mut u64,
) -> c_int;
/// Cleanup structures allocated by librasurecode_encode
///
/// The caller has no context, so cannot safely free memory
/// allocated by liberasurecode, so it must pass the
/// deallocation responsibility back to liberasurecode.
///
/// @param desc - liberasurecode descriptor/handle
/// from liberasurecode_instance_create()
/// @param encoded_data - (char **) array of k data
/// fragments (char *), allocated by liberasurecode_encode
/// @param encoded_parity - (char **) array of m parity
/// fragments (char *), allocated by liberasurecode_encode
///
/// @return 0 in success; -error otherwise
///
fn liberasurecode_encode_cleanup(
desc: Desc,
encoded_data: *mut *mut u8,
encoded_parity: *mut *mut u8,
) -> c_int;
/// Reconstruct original data from a set of k encoded fragments
///
/// @param desc - liberasurecode descriptor/handle
/// from liberasurecode_instance_create()
/// @param fragments - erasure encoded fragments (> = k)
/// @param num_fragments - number of fragments being passed in
/// @param fragment_len - length of each fragment (assume they are the same)
/// @param force_metadata_checks - force fragment metadata checks (default: 0)
/// @param out_data - _output_ pointer to decoded data
/// @param out_data_len - _output_ length of decoded output
/// (both output data pointers are allocated by liberasurecode,
/// caller invokes liberasurecode_decode_clean() after it has
/// read decoded data in 'out_data')
///
/// @return 0 on success, -error code otherwise
///
fn liberasurecode_decode(
desc: Desc,
fragments: *const *const u8,
num_fragments: c_int,
fragment_len: u64,
force_metadata_checks: c_int,
out_data: *mut *mut u8,
out_data_len: *mut u64,
) -> c_int;
/// Cleanup structures allocated by librasurecode_decode
///
/// The caller has no context, so cannot safely free memory
/// allocated by liberasurecode, so it must pass the
/// deallocation responsibility back to liberasurecode.
///
/// @param desc - liberasurecode descriptor/handle
/// from liberasurecode_instance_create()
/// @param data - (char *) buffer of data decoded by librasurecode_decode
///
/// @return 0 on success; -error otherwise
///
fn liberasurecode_decode_cleanup(desc: Desc, data: *mut u8) -> c_int;
/// Reconstruct a missing fragment from a subset of available fragments
///
/// @param desc - liberasurecode descriptor/handle
/// from liberasurecode_instance_create()
/// @param available_fragments - erasure encoded fragments
/// @param num_fragments - number of fragments being passed in
/// @param fragment_len - size in bytes of the fragments
/// @param destination_idx - missing idx to reconstruct
/// @param out_fragment - output of reconstruct
///
/// @return 0 on success, -error code otherwise
///
fn liberasurecode_reconstruct_fragment(
desc: Desc,
available_fragments: *const *const u8,
num_fragments: c_int,
fragment_len: u64,
destination_idx: c_int,
out_fragment: *mut u8,
) -> Desc;
}
pub fn instance_create(id: EcBackendId, args: &EcArgs) -> Result<Desc, ErrorCode> {
match unsafe { liberasurecode_instance_create(id, args) } {
desc if desc > 0 => Ok(desc),
code => Err(-code as ErrorCode),
}
}
pub fn instance_destroy(desc: Desc) -> Result<(), ErrorCode> {
match unsafe { liberasurecode_instance_destroy(desc) } {
0 => Ok(()),
code => Err(code as ErrorCode),
}
}
pub fn encode(
desc: Desc,
orig_data: &[u8],
) -> Result<(*mut *mut u8, *mut *mut u8, u64), ErrorCode> {
let mut encoded_data = ptr::null_mut();
let mut encoded_parity = ptr::null_mut();
let mut fragment_len = 0;
let result = unsafe {
liberasurecode_encode(
desc,
orig_data.as_ptr(),
orig_data.len() as u64,
&mut encoded_data,
&mut encoded_parity,
&mut fragment_len,
)
};
match result {
0 => Ok((encoded_data, encoded_parity, fragment_len)),
_ => Err(-result as ErrorCode),
}
}
pub fn encode_cleanup(
desc: Desc,
encoded_data: *mut *mut u8,
encoded_parity: *mut *mut u8,
) -> Result<(), ErrorCode> {
match unsafe { liberasurecode_encode_cleanup(desc, encoded_data, encoded_parity) } { | }
pub fn decode(
desc: Desc,
fragments: &[&[u8]],
force_metadata_checks: bool,
) -> Result<(*mut u8, u64), ErrorCode> {
assert!(!fragments.is_empty());
let mut out_data = ptr::null_mut();
let mut out_data_len = 0;
let result = unsafe {
liberasurecode_decode(
desc,
fragments
.iter()
.map(|x| x.as_ptr())
.collect::<Vec<_>>()
.as_ptr(),
fragments.len() as c_int,
fragments[0].len() as u64,
if force_metadata_checks { 1 } else { 0 },
&mut out_data,
&mut out_data_len,
)
};
match result {
0 => Ok((out_data, out_data_len)),
_ => Err(-result as ErrorCode),
}
}
pub fn decode_cleanup(desc: Desc, data: *mut u8) -> Result<(), ErrorCode> {
match unsafe { liberasurecode_decode_cleanup(desc, data) } {
0 => Ok(()),
code => Err(-code as ErrorCode),
}
}
pub fn reconstruct_fragment(
desc: Desc,
available_fragments: &[&[u8]],
destination_idx: usize,
) -> Result<Vec<u8>, ErrorCode> {
assert!(!available_fragments.is_empty());
let mut buf = vec![0; available_fragments[0].len()];
let result = unsafe {
liberasurecode_reconstruct_fragment(
desc,
available_fragments
.iter()
.map(|x| x.as_ptr())
.collect::<Vec<_>>()
.as_ptr(),
available_fragments.len() as c_int,
available_fragments[0].len() as u64,
destination_idx as c_int,
buf.as_mut_ptr(),
)
};
match result {
0 => Ok(buf),
_ => Err(-result as ErrorCode),
}
} | 0 => Ok(()),
code => Err(-code as ErrorCode),
} |
extract.py | ## Basic Python libraries
import os
from PIL import Image
## Deep learning and array processing libraries
import numpy as np
import torch
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
## Inner-project imports
from model import EncoderCNN, DecoderRNN
##### Code begins #####
# Path to config file
image_directory = './CNN/images/'
network_directory = './CNN/models/'
# Setting up other necessary paths
encoder_path = f'{network_directory}encoder-5-3000.pkl'
# Define the compute device (either GPU or CPU)
if torch.cuda.is_available():
compute_device = torch.device('cuda:0')
else:
compute_device = torch.device('cpu')
print(f'Using device: {compute_device}')
# Create the data transforms for evaluating
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
# Configure network
network = EncoderCNN(embed_size=256)
network = network.eval()
network.load_state_dict(torch.load(encoder_path, map_location='cpu'))
network = network.to(compute_device)
def get_visual_features(img):
|
# Below is only there for testing, commented out for now
"""
if __name__ == '__main__':
# Inference
img = Image.open(f'{image_directory}input/1.png')
img = np.asarray(img)
features = get_visual_features(img)
print('End')
""" | """
Extracts the visual features from an input image. Converts input
into PIL Image, normalizes the image, then feeds it through a CNN.
The features returned from the CNN are then pooled into a 1x512x1x1
and finally squeezed to produce our [512] array output.
Input
img :: 3D NumPy array
Takes a [x, y, 3] NumPy array to be converted into a PIL Image
Output
features :: 1D NumPy array
Returns a [512] NumPy array of the visual features from the CNN
"""
# Convert to PIL Image and perform transformation
img = Image.fromarray(img).convert('RGB')
img = img.resize([224, 224], Image.LANCZOS)
img = transform(img)
# Add a 4th dimension and send to compute device (GPU or CPU)
img = img.unsqueeze(0)
img = img.to(compute_device)
# Feed input through CNN
features = network(img)
# Squeeze into a [512] vector
features = features.squeeze()
# Convert to NumPy
features = features.cpu().detach().numpy()
return features |
web-view-page.js | const ListViewLinksModel = require("../../links-view-model");
const link = require("../../link");
const navigationLinks = [
new link("BasicWebView", "ns-ui-widgets-category/web-view/basics/basics-page"),
new link("WebView source", "ns-ui-widgets-category/web-view/source-load/source-load-page"),
new link("Gestures", "ns-ui-widgets-category/web-view/gestures/gestures-page")
];
function | (args) {
const page = args.object;
page.bindingContext = new ListViewLinksModel({
links: navigationLinks,
actionBarTitle: args.context.title
});
}
exports.onNavigatingTo = onNavigatingTo;
| onNavigatingTo |
alter.directive.spec.ts | import { AlterDirective } from './alter.directive';
describe('AlterDirective', () => {
it('should create an instance', () => {
const directive = new AlterDirective();
expect(directive).toBeTruthy(); | });
}); |
|
cli.rs | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
#![forbid(unsafe_code)]
//! Functionality related to the command line interface of the Move prover.
use anyhow::anyhow;
use clap::{App, Arg};
use docgen::docgen::DocgenOptions;
use log::LevelFilter;
use serde::{Deserialize, Serialize};
use simplelog::{
CombinedLogger, Config, ConfigBuilder, LevelPadding, SimpleLogger, TermLogger, TerminalMode,
};
use std::sync::atomic::{AtomicBool, Ordering};
/// Represents the virtual path to the boogie prelude which is inlined into the binary.
pub const INLINE_PRELUDE: &str = "<inline-prelude>";
/// Default flags passed to boogie. Additional flags will be added to this via the -B option.
const DEFAULT_BOOGIE_FLAGS: &[&str] = &[
"-doModSetAnalysis",
"-noinfer",
"-printVerifiedProceduresCount:0",
"-printModel:4",
// Right now, we let boogie only produce one error per procedure. The boogie wrapper isn't
// capable to sort out multiple errors and associate them with models otherwise.
"-errorLimit:1",
];
/// Atomic used to prevent re-initialization of logging.
static LOGGER_CONFIGURED: AtomicBool = AtomicBool::new(false);
/// Atomic used to detect whether we are running in test mode.
static TEST_MODE: AtomicBool = AtomicBool::new(false);
/// Default for what functions to verify.
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)]
pub enum VerificationScope {
/// Verify only public functions.
Public,
/// Verify all functions.
All,
/// Verify no functions
None,
}
impl Default for VerificationScope {
fn default() -> Self {
Self::Public
}
}
/// Represents options provided to the tool. Most of those options are configured via a toml
/// source; some over the command line flags.
#[derive(Debug, Clone, Deserialize, Serialize)]
#[serde(default, deny_unknown_fields)]
pub struct Options {
/// Path to the boogie prelude. The special string `INLINE_PRELUDE` is used to refer to
/// a prelude build into this binary.
pub prelude_path: String,
/// The path to the boogie output which represents the verification problem.
pub output_path: String,
/// Verbosity level for logging.
pub verbosity_level: LevelFilter,
/// Whether to run the documentation generator instead of the prover.
pub run_docgen: bool,
/// An account address to use if none is specified in the source.
pub account_address: String,
/// The paths to the Move sources.
pub move_sources: Vec<String>,
/// The paths to any dependencies for the Move sources. Those will not be verified but
/// can be used by `move_sources`.
pub move_deps: Vec<String>,
/// Options for the prover.
pub prover: ProverOptions,
/// Options for the prover backend.
pub backend: BackendOptions,
/// Options for the documentation generator.
pub docgen: DocgenOptions,
}
impl Default for Options {
fn default() -> Self {
Self {
prelude_path: INLINE_PRELUDE.to_string(),
output_path: "output.bpl".to_string(),
run_docgen: false,
account_address: "0x234567".to_string(),
verbosity_level: LevelFilter::Info,
move_sources: vec![],
move_deps: vec![],
docgen: DocgenOptions::default(),
prover: ProverOptions::default(),
backend: BackendOptions::default(),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(default, deny_unknown_fields)]
pub struct ProverOptions {
/// Whether to only generate backend code.
pub generate_only: bool,
/// Whether to generate stubs for native functions.
pub native_stubs: bool,
/// Whether to minimize execution traces in errors.
pub minimize_execution_trace: bool,
/// Whether to omit debug information in generated model.
pub omit_model_debug: bool,
/// Whether output for e.g. diagnosis shall be stable/redacted so it can be used in test
/// output.
pub stable_test_output: bool,
/// Scope of what functions to verify.
pub verify_scope: VerificationScope,
/// Whether to emit global axiom that resources are well-formed.
pub resource_wellformed_axiom: bool,
/// Whether to automatically debug trace values of specification expression leafs.
pub debug_trace: bool,
}
impl Default for ProverOptions {
fn default() -> Self {
Self {
generate_only: false,
native_stubs: false,
minimize_execution_trace: true,
omit_model_debug: false,
stable_test_output: false,
verify_scope: VerificationScope::Public,
resource_wellformed_axiom: true,
debug_trace: false,
}
}
}
/// Backend options.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(default, deny_unknown_fields)]
pub struct BackendOptions {
/// Path to the boogie executable.
pub boogie_exe: String,
/// Path to the z3 executable.
pub z3_exe: String,
/// Whether to use cvc4.
pub use_cvc4: bool,
/// Path to the cvc4 executable.
pub cvc4_exe: String,
/// List of flags to pass on to boogie.
pub boogie_flags: Vec<String>,
/// Whether to use native array theory.
pub use_array_theory: bool,
/// Whether to produce an SMT file for each verification problem.
pub generate_smt: bool,
/// Whether native instead of stratified equality should be used.
pub native_equality: bool,
/// A string determining the type of requires used for parameter type checks. Can be
/// `"requires"` or `"free requires`".
pub type_requires: String,
/// The depth until which stratified functions are expanded.
pub stratification_depth: usize,
/// A string to be used to inline a function of medium size. Can be empty or `{:inline}`.
pub aggressive_func_inline: String,
/// A string to be used to inline a function of small size. Can be empty or `{:inline}`.
pub func_inline: String,
/// A bound to apply to the length of serialization results.
pub serialize_bound: usize,
/// How many times to call the prover backend for the verification problem. This is used for
/// benchmarking.
pub bench_repeat: usize,
}
impl Default for BackendOptions {
fn default() -> Self {
let get_env = |s| std::env::var(s).unwrap_or_else(|_| String::new());
Self {
bench_repeat: 1,
boogie_exe: get_env("BOOGIE_EXE"),
z3_exe: get_env("Z3_EXE"),
use_cvc4: false,
cvc4_exe: get_env("CVC4_EXE"),
boogie_flags: vec![],
use_array_theory: false,
generate_smt: false,
native_equality: false,
type_requires: "free requires".to_owned(),
stratification_depth: 4,
aggressive_func_inline: "".to_owned(),
func_inline: "{:inline}".to_owned(),
serialize_bound: 4,
}
}
}
impl Options {
/// Creates options from toml configuration source.
pub fn create_from_toml(toml_source: &str) -> anyhow::Result<Options> {
Ok(toml::from_str(toml_source)?)
}
/// Creates options from toml configuration file.
pub fn create_from_toml_file(toml_file: &str) -> anyhow::Result<Options> {
Self::create_from_toml(&std::fs::read_to_string(toml_file)?)
}
// Creates options from command line arguments. This parses the arguments and terminates
// the program on errors, printing usage information. The first argument is expected to be
// the program name.
pub fn create_from_args(args: &[String]) -> anyhow::Result<Options> {
// Clap definition of the command line interface.
let is_number = |s: String| {
s.parse::<usize>()
.map(|_| ())
.map_err(|_| "expected number".to_string())
};
let cli = App::new("mvp")
.version("0.1.0")
.about("The Move Prover")
.author("The Libra Core Contributors")
.arg(
Arg::with_name("config")
.short("c")
.long("config")
.takes_value(true)
.value_name("TOML_FILE")
.env("MOVE_PROVER_CONFIG")
.help("path to a configuration file. \
Values in this file will be overridden by command line flags"),
)
.arg(
Arg::with_name("config-str")
.conflicts_with("config")
.short("C")
.long("config-str")
.takes_value(true)
.multiple(true)
.number_of_values(1)
.value_name("TOML_STRING")
.help("inline configuration string in toml syntax. Can be repeated. \
Use as in `-C=prover.opt=value -C=backend.opt=value`"),
)
.arg(
Arg::with_name("print-config")
.long("print-config")
.help("prints the effective toml configuration, then exits")
)
.arg(
Arg::with_name("output")
.short("o")
.long("output")
.takes_value(true)
.value_name("BOOGIE_FILE")
.help("path to the boogie output which represents the verification problem"),
)
.arg(
Arg::with_name("verbosity")
.short("v")
.long("verbose")
.takes_value(true)
.possible_values(&["error", "warn", "info", "debug"])
.help("verbosity level."),
)
.arg(
Arg::with_name("generate-only")
.short("g")
.long("generate-only")
.help("only generate boogie file but do not call boogie"),
)
.arg(
Arg::with_name("trace")
.long("trace")
.short("t")
.help("enables automatic tracing of expressions in prover errors")
)
.arg(
Arg::with_name("docgen")
.long("docgen")
.help("run the documentation generator instead of the prover. \
Generated docs will be written into the directory at `--output=<path>`"),
)
.arg(
Arg::with_name("verify")
.long("verify")
.takes_value(true)
.possible_values(&["public", "all", "none"])
.value_name("SCOPE")
.help("default scope of verification \
(can be overridden by `pragma verify=true|false`)"),
)
.arg(
Arg::with_name("bench-repeat")
.long("bench-repeat")
.takes_value(true)
.value_name("COUNT")
.validator(is_number)
.help(
"for benchmarking: how many times to call the backend on the verification problem",
),
)
.arg(
Arg::with_name("dependencies")
.long("dependency")
.short("d")
.multiple(true)
.number_of_values(1)
.takes_value(true)
.value_name("PATH_TO_DEPENDENCY")
.help("path to a Move file, or a directory which will be searched for \
Move files, containing dependencies which will not be verified")
)
.arg(
Arg::with_name("sources")
.multiple(true)
.value_name("PATH_TO_SOURCE_FILE")
.min_values(1)
.help("the source files to verify"),
)
.after_help("More options available via `--config file` or `--config-str str`. \
Use `--print-config` to see format and current values. \
See `move-prover/src/cli.rs::Option` for documentation.");
// Parse the arguments. This will abort the program on parsing errors and print help.
// It will also accept options like --help.
let matches = cli.get_matches_from(args);
// Initialize options.
let get_vec = |s: &str| -> Vec<String> {
match matches.values_of(s) {
Some(vs) => vs.map(|v| v.to_string()).collect(),
_ => vec![],
}
};
let mut options = if matches.is_present("config") {
Self::create_from_toml_file(matches.value_of("config").unwrap())?
} else if matches.is_present("config-str") {
let config_lines = get_vec("config-str").join("\n");
Self::create_from_toml(&config_lines)?
} else {
Options::default()
};
// Analyze arguments.
if matches.is_present("output") {
options.output_path = matches.value_of("output").unwrap().to_string();
}
if matches.is_present("verbosity") {
options.verbosity_level = match matches.value_of("verbosity").unwrap() {
"error" => LevelFilter::Error,
"warn" => LevelFilter::Warn,
"info" => LevelFilter::Info,
"debug" => LevelFilter::Debug,
_ => unreachable!("should not happen"),
}
}
if matches.occurrences_of("sources") > 0 {
options.move_sources = get_vec("sources");
}
if matches.occurrences_of("dependencies") > 0 {
options.move_deps = get_vec("dependencies");
}
if matches.is_present("verify") {
options.prover.verify_scope = match matches.value_of("verify").unwrap() {
"public" => VerificationScope::Public,
"all" => VerificationScope::All,
"none" => VerificationScope::None,
_ => unreachable!("should not happen"),
}
}
if matches.is_present("bench-repeat") {
options.backend.bench_repeat =
matches.value_of("bench-repeat").unwrap().parse::<usize>()?;
}
if matches.is_present("docgen") {
options.run_docgen = true;
}
if matches.is_present("trace") {
options.prover.debug_trace = true;
}
if matches.is_present("print-config") {
println!("{}", toml::to_string(&options).unwrap());
Err(anyhow!("exiting"))
} else {
Ok(options)
}
}
/// Sets up logging based on provided options. This should be called as early as possible
/// and before any use of info!, warn! etc.
pub fn setup_logging(&self) {
CombinedLogger::init(vec![TermLogger::new(
self.verbosity_level,
ConfigBuilder::new()
.set_time_level(LevelFilter::Debug)
.set_level_padding(LevelPadding::Off)
.build(),
TerminalMode::Mixed,
)])
.expect("Unexpected CombinedLogger init failure");
}
pub fn setup_logging_for_test(&self) {
// Loggers are global static, so we have to protect against reinitializing.
if LOGGER_CONFIGURED.compare_and_swap(false, true, Ordering::Relaxed) {
return;
}
TEST_MODE.store(true, Ordering::Relaxed);
SimpleLogger::init(self.verbosity_level, Config::default())
.expect("UnexpectedSimpleLogger failure");
}
/// Returns command line to call boogie.
pub fn get_boogie_command(&self, boogie_file: &str) -> Vec<String> {
let mut result = vec![self.backend.boogie_exe.clone()];
let mut add = |sl: &[&str]| result.extend(sl.iter().map(|s| (*s).to_string()));
add(DEFAULT_BOOGIE_FLAGS);
if self.backend.use_cvc4 {
add(&[
"-proverOpt:SOLVER=cvc4",
&format!("-proverOpt:PROVER_PATH={}", &self.backend.cvc4_exe),
]);
} else {
add(&[&format!("-proverOpt:PROVER_PATH={}", &self.backend.z3_exe)]);
}
if self.backend.use_array_theory {
add(&["-useArrayTheory"]);
}
add(&["-proverOpt:O:smt.QI.EAGER_THRESHOLD=100"]);
add(&["-proverOpt:O:smt.QI.LAZY_THRESHOLD=100"]);
// TODO: see what we can make out of these flags.
//add(&["-proverOpt:O:smt.QI.PROFILE=true"]);
//add(&["-proverOpt:O:trace=true"]);
//add(&["-proverOpt:VERBOSITY=3"]);
//add(&["-proverOpt:C:-st"]);
if self.backend.generate_smt {
add(&["-proverLog:@[email protected]"]);
}
for f in &self.backend.boogie_flags {
add(&[f.as_str()]);
}
add(&[boogie_file]);
result
}
/// Returns name of file where to log boogie output.
pub fn | (&self, boogie_file: &str) -> String {
format!("{}.log", boogie_file)
}
}
| get_boogie_log_file |
Multi_Image_Classification.py | # Primary Python Files for Image Classification
import numpy as np
import pandas as pd
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # dont show any tensorflow warning messages
import cv2
# Keras libraries used for making the model and tensorflow
import tensorflow, keras
from tensorflow.keras.utils import to_categorical
from keras.layers import Dense,Conv2D,Flatten,MaxPool2D,Dropout
from keras.models import Sequential
# Sklearn library for splitting the data precisely
from sklearn.model_selection import train_test_split
'''
Multi_Image_Classification Class
Description:
1. Identify different sets of images based on the labels you provide.
2. Works based off a sequential model.
3. Uses a Convolutional Neural Network.
'''
class Multi_Image_Classification:
# ------------------------------ Generic Fields Needed for Training ---------------------------------- #
shape = (200,200) # predefine a established shape for training and resizing the images (default)
labels = [] # define the labels to train on
# --------------------------- Training Tools ---------------------------------- #
train_path = './Multi_Classification/train' # define the path where the training images are located
train_labels = None # define the labels (same as testing)
train_images = None # define the images with the training
x_train = None # split the training images for training
y_train = None # split the training labels for training
# ------------------------- Testing Tools -------------------------------------- #
test_path = './Multi_Classification/test' # define the path where the testing images are located
x_val = None # split the training images for testing
y_val = None # split the training labels for testing
test_labels = None # define the testing labels (same as training)
test_images = None # define the testing images
# ----------------------------------- Main Model Tools ------------------------------- #
epoch = 50 # default epoch
batch_size = 10 # default batch size
model = None # define the model (Sequential for Image Classification)
# ------------------------- Define the Functions for Making the model ---------------------- #
# define the labels and images depending on the directory path
def set_data(self, directory_path):
data_labels = [] # define the set of labels according to the name of the file
data_images = [] # define the images
# iterate through all the images in the directory
for filename in os.listdir(directory_path):
# Get the values of the images at the directory path
img = cv2.imread(os.path.join(directory_path, filename))
# Spliting file names and storing the labels for image in list
data_labels.append(filename.split('_')[0])
# Resize all images to a specific shape
img = cv2.resize(img, self.shape)
data_images.append(img) # append the image
data_labels = pd.get_dummies(data_labels).values # Get the categorical data
data_images = np.array(data_images) # Define the image array as a np array for fitting
return data_labels, data_images # return the labels, images for the specific directory
# define the tools for utilzing on creation of the object
def __init__(self, create_model, labels, shape, epoch, batch_size):
np.random.seed(1) # sets the random seed of the NumPy pseudo-random number generator
self.shape = shape # let the user enter the shape of the images to be formed (default 200x200)
# let the user define the labels for their model they want to create
self.labels = labels # default values
# define the training images and labels
self.train_labels, self.train_images = self.set_data(self.train_path)
# Splitting Training data into train and validation dataset
self.x_train,self.x_val,self.y_train,self.y_val = train_test_split(self.train_images,self.train_labels,random_state=1)
# define the test labels and images
self.test_labels, self.test_images = self.set_data(self.test_path)
# define the model for predicition
if create_model == True:
self.model = self.create_model(epoch, batch_size, self.x_train, self.y_train, self.x_val, self.y_val)
# create the model to be used for predicition
def create_model(self, epoch, batch_size, x_train, y_train, x_val, y_val):
model = Sequential() # define the model as sequential
model.add(Conv2D(kernel_size=(3,3), filters=32, activation='tanh', input_shape=(200,200,3,))) # define the first layer
model.add(Conv2D(filters=30,kernel_size = (3,3),activation='tanh')) # define the second layer
model.add(MaxPool2D(2,2)) # define the third layer
model.add(Conv2D(filters=30,kernel_size = (3,3),activation='tanh')) # define the fourth layer
model.add(MaxPool2D(2,2)) # define the fifth layer
model.add(Conv2D(filters=30,kernel_size = (3,3),activation='tanh')) # define the sixth layer
model.add(Flatten()) # define the seventh layer
model.add(Dense(20,activation='relu')) # define the eigth layer
model.add(Dense(15,activation='relu')) # define the ninth layer
model.add(Dense(len(self.labels),activation = 'softmax')) # define the tenth layer (according to the number of labels for the model)
model.compile(loss='categorical_crossentropy', metrics=['acc'], optimizer='adam') # compile the models with categorical because we are working with multiple labels
history = model.fit(x_train,y_train,epochs=epoch,batch_size=batch_size,validation_data=(x_val,y_val)) # train the model
# after the training is done, define a dictionary that holds the model and history from the training
complete_model = {} # define the dictionary
complete_model['model'] = model # define the model with its key
complete_model['history'] = history # define the history with its key
complete_model['labels'] = self.labels # save the labels into the dictionary
return complete_model # return the model at the end
# function to save the model that was created in the create_model function
def save_model(self, model_name, model):
model.save('./Models/{}.h5'.format(model_name)) # save the model in the models directory
# function to save the model's labels to be used later
def save_labels(self, labels, model_name):
f = open('./Models/{}_Labels.txt'.format(model_name), 'a') # create the .txt file that will contain the labels of the model
# iterate through the labels when the model was first created
for i in range(len(labels)):
f.write("{}\n".format(labels[i])) # write the labels to the file
f.close() # after iterating through all the labels, close the file so the space can be free
# ------------------------------------------------------ Define the functions used for classifiying --------------------------------------------- #
# classifies images based on the model and the selected image
def | (self, image, model):
checkImage = image[0] # get the image
checklabel = image[0] # get the label of the image
predict = model.predict(np.array(checkImage)) # get the predicition
predicted_label = self.labels[np.argmax(predict)] # get the predicted label
return predicted_label # return the predicted label from the labels provided by the user
| classify_image |
addon.go | package create
import (
"fmt"
awseks "github.com/aws/aws-sdk-go/service/eks"
"github.com/kris-nova/logger"
"github.com/weaveworks/eksctl/pkg/actions/addon"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5"
"github.com/weaveworks/eksctl/pkg/ctl/cmdutils"
)
func createAddonCmd(cmd *cmdutils.Cmd) {
cmd.ClusterConfig = api.NewClusterConfig()
cmd.SetDescription(
"addon",
"Create an Addon",
"",
)
var force, wait bool
cmd.ClusterConfig.Addons = []*api.Addon{{}}
cmd.FlagSetGroup.InFlagSet("Addon", func(fs *pflag.FlagSet) {
fs.StringVar(&cmd.ClusterConfig.Addons[0].Name, "name", "", "Add-on name")
fs.StringVar(&cmd.ClusterConfig.Addons[0].Version, "version", "", "Add-on version")
fs.StringVar(&cmd.ClusterConfig.Addons[0].ServiceAccountRoleARN, "service-account-role-arn", "", "Add-on serviceAccountRoleARN")
fs.BoolVar(&force, "force", false, "Force applies the add-on to overwrite an existing add-on")
fs.BoolVar(&wait, "wait", false, "Wait for the addon creation to complete")
fs.StringSliceVar(&cmd.ClusterConfig.Addons[0].AttachPolicyARNs, "attach-policy-arn", []string{}, "ARN of the policies to attach")
})
cmd.FlagSetGroup.InFlagSet("General", func(fs *pflag.FlagSet) {
cmdutils.AddClusterFlag(fs, cmd.ClusterConfig.Metadata)
cmdutils.AddRegionFlag(fs, &cmd.ProviderConfig)
cmdutils.AddConfigFileFlag(fs, &cmd.ClusterConfigFile)
cmdutils.AddTimeoutFlag(fs, &cmd.ProviderConfig.WaitTimeout)
})
cmdutils.AddCommonFlagsForAWS(cmd.FlagSetGroup, &cmd.ProviderConfig, false)
cmd.CobraCommand.RunE = func(_ *cobra.Command, args []string) error {
cmd.NameArg = cmdutils.GetNameArg(args)
if err := cmdutils.NewCreateOrUpgradeAddonLoader(cmd).Load(); err != nil {
return err
}
clusterProvider, err := cmd.NewCtl()
if err != nil {
return err
}
oidc, err := clusterProvider.NewOpenIDConnectManager(cmd.ClusterConfig)
if err != nil {
return err
}
oidcProviderExists, err := oidc.CheckProviderExists() | }
if !oidcProviderExists {
logger.Warning("no IAM OIDC provider associated with cluster, try 'eksctl utils associate-iam-oidc-provider --region=%s --cluster=%s'", cmd.ClusterConfig.Metadata.Region, cmd.ClusterConfig.Metadata.Name)
}
stackManager := clusterProvider.NewStackManager(cmd.ClusterConfig)
output, err := clusterProvider.Provider.EKS().DescribeCluster(&awseks.DescribeClusterInput{
Name: &cmd.ClusterConfig.Metadata.Name,
})
if err != nil {
return fmt.Errorf("failed to fetch cluster %q version: %v", cmd.ClusterConfig.Metadata.Name, err)
}
logger.Info("Kubernetes version %q in use by cluster %q", *output.Cluster.Version, cmd.ClusterConfig.Metadata.Name)
cmd.ClusterConfig.Metadata.Version = *output.Cluster.Version
clientSet, err := clusterProvider.NewStdClientSet(cmd.ClusterConfig)
if err != nil {
return err
}
addonManager, err := addon.New(cmd.ClusterConfig, clusterProvider.Provider.EKS(), stackManager, oidcProviderExists, oidc, clientSet)
if err != nil {
return err
}
for _, a := range cmd.ClusterConfig.Addons {
if force { //force is specified at cmdline level
a.Force = true
}
err := addonManager.Create(a, wait)
if err != nil {
return err
}
}
return nil
}
} | if err != nil {
return err |
time.go | package msgpack
import (
"encoding/binary"
"fmt"
"reflect"
"time"
"github.com/vmihailenco/msgpack/codes"
)
var timeExtId int8 = -1
func init() {
timeType := reflect.TypeOf((*time.Time)(nil)).Elem()
registerExt(timeExtId, timeType, encodeTimeValue, decodeTimeValue)
}
func (e *Encoder) EncodeTime(tm time.Time) error {
b := e.encodeTime(tm)
if err := e.encodeExtLen(len(b)); err != nil {
return err
}
if err := e.w.WriteByte(byte(timeExtId)); err != nil {
return err
}
return e.write(b)
}
func (e *Encoder) encodeTime(tm time.Time) []byte {
secs := uint64(tm.Unix())
if secs>>34 == 0 {
data := uint64(tm.Nanosecond())<<34 | secs
if data&0xffffffff00000000 == 0 {
b := make([]byte, 4)
binary.BigEndian.PutUint32(b, uint32(data))
return b
} else {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b, data)
return b
}
}
b := make([]byte, 12)
binary.BigEndian.PutUint32(b, uint32(tm.Nanosecond()))
binary.BigEndian.PutUint64(b[4:], uint64(secs))
return b
}
func (d *Decoder) DecodeTime() (time.Time, error) {
tm, err := d.decodeTime()
if err != nil {
return tm, err
}
if tm.IsZero() {
// Assume that zero time does not have timezone information.
return tm.UTC(), nil
}
return tm, nil
}
func (d *Decoder) decodeTime() (time.Time, error) {
extLen := d.extLen
d.extLen = 0
if extLen == 0 {
c, err := d.readCode()
if err != nil {
return time.Time{}, err
}
// Legacy format.
if c == codes.FixedArrayLow|2 {
sec, err := d.DecodeInt64()
if err != nil {
return time.Time{}, err
}
nsec, err := d.DecodeInt64()
if err != nil {
return time.Time{}, err
}
return time.Unix(sec, nsec), nil
}
if codes.IsString(c) {
s, err := d.string(c)
if err != nil {
return time.Time{}, err
}
return time.Parse(time.RFC3339Nano, s)
}
extLen, err = d.parseExtLen(c)
if err != nil {
return time.Time{}, err
}
// Skip ext id.
_, err = d.s.ReadByte()
if err != nil {
return time.Time{}, nil
}
}
b, err := d.readN(extLen)
if err != nil {
return time.Time{}, err
}
switch len(b) {
case 4:
sec := binary.BigEndian.Uint32(b)
return time.Unix(int64(sec), 0), nil
case 8:
sec := binary.BigEndian.Uint64(b)
nsec := int64(sec >> 34)
sec &= 0x00000003ffffffff
return time.Unix(int64(sec), nsec), nil
case 12:
nsec := binary.BigEndian.Uint32(b)
sec := binary.BigEndian.Uint64(b[4:])
return time.Unix(int64(sec), int64(nsec)), nil
default:
err = fmt.Errorf("msgpack: invalid ext len=%d decoding time", extLen)
return time.Time{}, err
}
}
func encodeTimeValue(e *Encoder, v reflect.Value) error {
tm := v.Interface().(time.Time)
b := e.encodeTime(tm)
return e.write(b)
}
func decodeTimeValue(d *Decoder, v reflect.Value) error | {
tm, err := d.DecodeTime()
if err != nil {
return err
}
v.Set(reflect.ValueOf(tm))
return nil
} |
|
Pug.ts | import { ensureDirSync, writeFileSync } from "fs-extra";
import { sync as globSync } from "glob";
import { join, parse, resolve } from "path";
import { Options, renderFile } from "pug";
import AbstractBundler from "./AbstractBundler";
export type PugOptions = Options & {
inputdir: string; | outputdir: string;
};
export default class Pug extends AbstractBundler {
private inputdir: string;
private outputdir: string;
private config: PugOptions;
constructor(config: PugOptions, watch: boolean, autobundle?: boolean) {
super();
this.inputdir = config.inputdir;
this.outputdir = config.outputdir;
delete config.inputdir;
delete config.outputdir;
this.config = config;
if (watch) {
this.initWatcher(join(this.inputdir, "/*.pug"), autobundle);
}
}
public async bundle(path?: string): Promise<void> {
if (!path) {
let paths = globSync(join(this.inputdir, "/*.pug"));
for (let p of paths) await this.bundle(p);
return;
}
let path_obj = parse(path),
result = renderFile(path, this.config);
let outputdir = resolve(this.outputdir, path_obj.base == "index.pug" ? "" : path_obj.name),
output = join(outputdir, "/index.html");
ensureDirSync(outputdir);
writeFileSync(output, result);
}
} | |
request_test.ts | // Copyright 2018-2019 the oak authors. All rights reserved. MIT license.
import {
test,
assertEquals,
assertStrictEq,
assertThrowsAsync
} from "./test_deps.ts";
import { ServerRequest } from "./deps.ts";
import httpErrors from "./httpError.ts";
import { Request, BodyType } from "./request.ts";
const encoder = new TextEncoder();
function createMockBodyReader(body: string): Deno.Reader {
const buf = encoder.encode(body);
let offset = 0;
return {
async read(p: Uint8Array): Promise<number | Deno.EOF> {
if (offset >= buf.length) {
return Deno.EOF;
}
const chunkSize = Math.min(p.length, buf.length - offset);
p.set(buf);
offset += chunkSize;
return chunkSize;
}
};
}
function createMockServerRequest(
url = "/",
body = "",
headerValues: { [header: string]: string } = {}
): ServerRequest {
const headers = new Headers();
for (const [key, value] of Object.entries(headerValues)) {
headers.set(key, value);
}
if (body.length && !headers.has("content-length")) {
headers.set("content-length", String(body.length));
}
return {
headers,
method: "GET",
url,
body: createMockBodyReader(body),
async respond() {}
} as any;
}
test(function requestSearch() {
const request = new Request(createMockServerRequest("/foo?bar=baz&qat=qux"));
assertEquals(request.path, "/foo");
assertEquals(request.search, "?bar=baz&qat=qux");
assertEquals(request.method, "GET");
assertEquals(Array.from(request.searchParams.entries()), [
["bar", "baz"],
["qat", "qux"]
]);
});
test(function serverRequestAvail() {
const mockServerRequest = createMockServerRequest();
const request = new Request(mockServerRequest);
assertStrictEq(request.serverRequest, mockServerRequest);
});
test(function requestAcceptEncoding() {
const request = new Request(
createMockServerRequest("/", "", {
"Accept-Encoding": "gzip, compress;q=0.2, identity;q=0.5"
})
);
assertEquals(request.acceptsEncodings("gzip", "identity"), "gzip");
});
| Accept: "application/json;q=0.2, text/html"
})
);
assertEquals(request.accepts("application/json", "text/html"), "text/html");
});
test(function requestAcceptsNoProvided() {
const request = new Request(
createMockServerRequest("/", "", {
Accept: "application/json;q=0.2, text/html"
})
);
assertEquals(request.accepts(), ["text/html", "application/json"]);
});
test(function requestNoAccepts() {
const request = new Request(createMockServerRequest("/"));
assertEquals(request.accepts("application/json"), undefined);
});
test(function requestNoAcceptsMatch() {
const request = new Request(
createMockServerRequest("/", "", { Accept: "text/html" })
);
assertEquals(request.accepts("application/json"), undefined);
});
test(async function requestBodyJson() {
const request = new Request(
createMockServerRequest("/", `{"foo":"bar"}`, {
"Content-Type": "application/json"
})
);
assertEquals(await request.body(), {
type: BodyType.JSON,
value: { foo: "bar" }
});
});
test(async function requestBodyForm() {
const request = new Request(
createMockServerRequest("/", `foo=bar&bar=1&baz=qux+%2B+quux`, {
"Content-Type": "application/x-www-form-urlencoded"
})
);
const actual = await request.body();
assertEquals(actual!.type, BodyType.Form);
if (actual && actual.type === "form") {
assertEquals(Array.from(actual.value.entries()), [
["foo", "bar"],
["bar", "1"],
["baz", "qux + quux"]
]);
} else {
throw Error("Unexpected response");
}
});
test(async function requestBodyText() {
const request = new Request(
createMockServerRequest("/", "hello world!", {
"Content-Type": "text/plain"
})
);
assertEquals(await request.body(), {
type: BodyType.Text,
value: "hello world!"
});
});
test(async function noBodyResolvesUndefined() {
const request = new Request(createMockServerRequest());
assertEquals(await request.body(), {
type: BodyType.Undefined,
value: undefined
});
});
test(async function unsupportedMediaTypeBody() {
const request = new Request(
createMockServerRequest("/", "blah", {
"Content-Type": "multipart/form-data"
})
);
await assertThrowsAsync(async () => {
await request.body();
}, httpErrors.UnsupportedMediaType);
}); | test(function requestAccepts() {
const request = new Request(
createMockServerRequest("/", "", { |
enGB.min.js | /**
* Minified by jsDelivr using Terser v3.14.1.
* Original file: /npm/[email protected]/dist/languages/enGB.js
*
* Do NOT use SRI with dynamically generated files! More information: https://www.jsdelivr.com/using-sri-with-dynamic-files
*/
!function(e,E){if("object"==typeof exports&&"object"==typeof module)module.exports=E(require("hyperformula"));else if("function"==typeof define&&define.amd)define(["hyperformula"],E);else{var O="object"==typeof exports?E(require("hyperformula")):E(e.HyperFormula);for(var N in O)("object"==typeof exports?exports:e)[N]=O[N]}}("undefined"!=typeof self?self:this,function(e){return function(e){var E={};function | (N){if(E[N])return E[N].exports;var A=E[N]={i:N,l:!1,exports:{}};return e[N].call(A.exports,A,A.exports,O),A.l=!0,A.exports}return O.m=e,O.c=E,O.d=function(e,E,N){O.o(e,E)||Object.defineProperty(e,E,{enumerable:!0,get:N})},O.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},O.t=function(e,E){if(1&E&&(e=O(e)),8&E)return e;if(4&E&&"object"==typeof e&&e&&e.__esModule)return e;var N=Object.create(null);if(O.r(N),Object.defineProperty(N,"default",{enumerable:!0,value:e}),2&E&&"string"!=typeof e)for(var A in e)O.d(N,A,function(E){return e[E]}.bind(null,A));return N},O.n=function(e){var E=e&&e.__esModule?function(){return e.default}:function(){return e};return O.d(E,"a",E),E},O.o=function(e,E){return Object.prototype.hasOwnProperty.call(e,E)},O.p="",O(O.s=4)}([,function(E,O){E.exports=e},,,function(e,E,O){"use strict";E.__esModule=!0,E.default=void 0;var N,A=(N=O(1))&&N.__esModule?N:{default:N};var I={errors:{CYCLE:"#CYCLE!",DIV_BY_ZERO:"#DIV/0!",ERROR:"#ERROR!",NA:"#N/A",NAME:"#NAME?",NUM:"#NUM!",REF:"#REF!",VALUE:"#VALUE!"},functions:{ABS:"ABS",ACOS:"ACOS",AND:"AND",ASIN:"ASIN",ATAN:"ATAN",ATAN2:"ATAN2",AVERAGE:"AVERAGE",AVERAGEA:"AVERAGEA",AVERAGEIF:"AVERAGEIF",BASE:"BASE",BIN2DEC:"BIN2DEC",BIN2HEX:"BIN2HEX",BIN2OCT:"BIN2OCT",BITAND:"BITAND",BITLSHIFT:"BITLSHIFT",BITOR:"BITOR",BITRSHIFT:"BITRSHIFT",BITXOR:"BITXOR",CEILING:"CEILING",CHAR:"CHAR",CHOOSE:"CHOOSE",CODE:"CODE",COLUMNS:"COLUMNS",CONCATENATE:"CONCATENATE",CORREL:"CORREL",COS:"COS",COT:"COT",COUNT:"COUNT",COUNTA:"COUNTA",COUNTBLANK:"COUNTBLANK",COUNTIF:"COUNTIF",COUNTIFS:"COUNTIFS",COUNTUNIQUE:"COUNTUNIQUE",DATE:"DATE",DAY:"DAY",DAYS:"DAYS",DEC2BIN:"DEC2BIN",DEC2HEX:"DEC2HEX",DEC2OCT:"DEC2OCT",DECIMAL:"DECIMAL",DEGREES:"DEGREES",DELTA:"DELTA",E:"E",EOMONTH:"EOMONTH",ERF:"ERF",ERFC:"ERFC",EVEN:"EVEN",EXP:"EXP",FALSE:"FALSE",IF:"IF",IFERROR:"IFERROR",IFNA:"IFNA",INDEX:"INDEX",INT:"INT",ISBLANK:"ISBLANK",ISERROR:"ISERROR",ISEVEN:"ISEVEN",ISLOGICAL:"ISLOGICAL",ISNONTEXT:"ISNONTEXT",ISNUMBER:"ISNUMBER",ISODD:"ISODD",ISTEXT:"ISTEXT",LN:"LN",LOG:"LOG",LOG10:"LOG10",MATCH:"MATCH",MAX:"MAX",MAXA:"MAXA",MAXPOOL:"MAXPOOL",MEDIAN:"MEDIAN",MEDIANPOOL:"MEDIANPOOL",MIN:"MIN",MINA:"MINA",MMULT:"MMULT",MOD:"MOD",MONTH:"MONTH",NOT:"NOT",ODD:"ODD",OFFSET:"OFFSET",OR:"OR",PI:"PI",POWER:"POWER",RADIANS:"RADIANS",RAND:"RAND",ROUND:"ROUND",ROUNDDOWN:"ROUNDDOWN",ROUNDUP:"ROUNDUP",ROWS:"ROWS",SIN:"SIN",SPLIT:"SPLIT",SQRT:"SQRT",SUM:"SUM",SUMIF:"SUMIF",SUMIFS:"SUMIFS",SUMPRODUCT:"SUMPRODUCT",SUMSQ:"SUMSQ",SWITCH:"SWITCH",TAN:"TAN",TEXT:"TEXT",TRANSPOSE:"TRANSPOSE",TRUE:"TRUE",TRUNC:"TRUNC",VLOOKUP:"VLOOKUP",XOR:"XOR",YEAR:"YEAR"},langCode:"enGB",ui:{NEW_SHEET_PREFIX:"Sheet"}};A.default.languages||(A.default.languages={}),A.default.languages[I.langCode]=I;var T=I;E.default=T}]).___});
//# sourceMappingURL=/sm/f1d72291621a8c8f9ff01dd12efd9ce6f316b458dd97054713b861e28dc7bb8f.map | O |
ndarray.rs | #![cfg(feature = "use_ndarray")]
use std::iter::FromIterator;
use ndarray::Array;
use smallvec::smallvec;
use rand::Rng;
use rand::distributions::Standard;
use n5::prelude::*;
use n5::ndarray::prelude::*;
#[test]
fn test_read_ndarray() {
let dir = tempdir::TempDir::new("rust_n5_ndarray_tests").unwrap();
let n = N5Filesystem::open_or_create(dir.path())
.expect("Failed to create N5 filesystem");
let block_size = smallvec![3, 4, 2, 1];
let data_attrs = DatasetAttributes::new(
smallvec![3, 300, 200, 100],
block_size.clone(),
i32::VARIANT,
CompressionType::default(),
);
let numel = data_attrs.get_block_num_elements();
let path_name = "test/dataset/group";
n.create_dataset(path_name, &data_attrs)
.expect("Failed to create dataset");
for k in 0..10 {
let z = block_size[3] * k;
for j in 0..10 {
let y = block_size[2] * j;
for i in 0..10 {
let x = block_size[1] * i;
let mut block_data = Vec::<i32>::with_capacity(numel);
for zo in 0..block_size[3] {
for yo in 0..block_size[2] {
for xo in 0..block_size[1] {
block_data.push(1000 + x as i32 + xo as i32);
block_data.push(2000 + y as i32 + yo as i32);
block_data.push(3000 + z as i32 + zo as i32);
}
}
}
let block_in = VecDataBlock::new(
block_size.clone(),
smallvec![0, u64::from(i), u64::from(j), u64::from(k)],
block_data);
n.write_block(path_name, &data_attrs, &block_in)
.expect("Failed to write block");
}
}
}
let bbox = BoundingBox::new(smallvec![0, 5, 4, 3], smallvec![3, 35, 15, 7]);
let a = n.read_ndarray::<i32>(path_name, &data_attrs, &bbox).unwrap();
for z in 0..a.shape()[3] {
for y in 0..a.shape()[2] {
for x in 0..a.shape()[1] {
assert_eq!(a[[0, x, y, z]], 1005 + x as i32, "0 {} {} {}: {}", x, y, z, a[[0, x, y, z]]);
assert_eq!(a[[1, x, y, z]], 2004 + y as i32, "1 {} {} {}: {}", x, y, z, a[[1, x, y, z]]);
assert_eq!(a[[2, x, y, z]], 3003 + z as i32, "2 {} {} {}: {}", x, y, z, a[[2, x, y, z]]);
}
}
}
}
#[test]
fn test_read_ndarray_oob() {
let dir = tempdir::TempDir::new("rust_n5_ndarray_tests").unwrap();
let n = N5Filesystem::open_or_create(dir.path())
.expect("Failed to create N5 filesystem");
let block_size = smallvec![50, 100];
let data_attrs = DatasetAttributes::new(
smallvec![100, 200],
block_size.clone(),
i32::VARIANT,
CompressionType::default(),
);
let path_name = "test/dataset/group";
n.create_dataset(path_name, &data_attrs)
.expect("Failed to create dataset");
let block_in = VecDataBlock::new(
smallvec![1, 1],
smallvec![1, 1],
vec![1]);
n.write_block(path_name, &data_attrs, &block_in)
.expect("Failed to write block");
let bbox = BoundingBox::new(smallvec![45, 175], smallvec![50, 50]);
let a = n.read_ndarray::<i32>(path_name, &data_attrs, &bbox).unwrap();
assert!(a.iter().all(|v| *v == 0));
}
#[test]
fn | () {
let dir = tempdir::TempDir::new("rust_n5_ndarray_tests").unwrap();
let n = N5Filesystem::open_or_create(dir.path())
.expect("Failed to create N5 filesystem");
let block_size = smallvec![3, 4, 2, 1];
let data_attrs = DatasetAttributes::new(
smallvec![3, 300, 200, 100],
block_size.clone(),
i32::VARIANT,
CompressionType::default(),
);
let path_name = "test/dataset/group";
n.create_dataset(path_name, &data_attrs)
.expect("Failed to create dataset");
let rng = rand::thread_rng();
let arr_shape = [3, 35, 15, 7];
let array: Array<i32, _> = Array::from_iter(
rng.sample_iter(&Standard)
.take(arr_shape.iter().product()))
.into_shape(arr_shape.clone()).unwrap()
.into_dyn();
let offset = smallvec![0, 5, 4, 3];
n.write_ndarray(path_name, &data_attrs, offset.clone(), &array, 0).unwrap();
let bbox = BoundingBox::new(offset, arr_shape.iter().map(|s| *s as u64).collect());
let a = n.read_ndarray::<i32>("test/dataset/group", &data_attrs, &bbox).unwrap();
// Also test c-order.
let mut a_c = Array::zeros(bbox.size_ndarray_shape().as_slice());
n.read_ndarray_into::<i32>("test/dataset/group", &data_attrs, &bbox, a_c.view_mut()).unwrap();
assert_eq!(array, a);
assert_eq!(array, a_c);
assert_eq!(a, a_c);
}
| test_write_read_ndarray |
add-user-to-profile.component.ts | import { Component, TemplateRef,OnInit , ViewChild,ViewEncapsulation } from '@angular/core';
import { LocalDataSource } from 'ng2-smart-table';
import { NbWindowService } from '@nebular/theme';
import { NbWindowRef } from '@nebular/theme';
import { Router, ActivatedRoute } from '@angular/router';
import { HttpClient, HttpHeaders } from '@angular/common/http';
import { ProfileService } from '../../../@core/data/profiles.service';
import { UserService } from '../../../@core/data/users.service';
import { user } from '../../../@core/models/user.model';
declare let $: any;
import * as _ from 'underscore';
import { environment } from '../../../../environments/environment';
import { AuthService } from '../../../@core/data/auth.service';
import { NgxSpinnerService } from 'ngx-spinner';
import { LocalStorageService } from '../../../../app/@core/data/local-storage.service';
@Component({
selector: 'ngx-add-user-to-profile',
templateUrl: './add-user-to-profile.component.html',
providers: [ ProfileService],
})
export class AddUserProfilesComponent implements OnInit{
@ViewChild('contentTemplate') contentTemplate: TemplateRef<any>;
@ViewChild('disabledEsc', { read: TemplateRef }) disabledEscTemplate: TemplateRef<HTMLElement>;
todo:string[];
done:string[];
count:number;
counter:number;
Username:any;
Name:any;
Usernamed:any;
Named:string="";
// pager object
pager: any = {};
classid:string="";
// paged items
pagedItems: any[];
LIST_IDS:string[];
response:any;
private allItems: any[];
apiUrl = environment.apiUrl;
reqHeader: any;
destId:any;
usernam:any;
profile:any;
constructor( private http: HttpClient,
private routers: Router,
private windowService: NbWindowService,
private route: ActivatedRoute,
private _auth_service: AuthService,
private ProfileService :ProfileService,
private spinner: NgxSpinnerService,
private LocalStorageService: LocalStorageService,
) {
this.reqHeader = new HttpHeaders({"Authorization": "Bearer " + this._auth_service.authentication.token});
}
async ngOnInit(){
setTimeout(() => {
this.spinner.show();
});
this.todo=[];
this.done=[];
this.LIST_IDS=[];
this.count=0;
this.counter=0;
this.usernam = this.LocalStorageService.retriveUserAccount();
if(this.usernam.Login !="Administrator"){
await this.http.get(this.apiUrl+`/formytek/public/api/UserProfile/${this.usernam[0].Username}`,{ headers: this.reqHeader })
.toPromise().then(
(response) => {
console.log( response['profile']);
this.profile = response['profile'];
})
if(this.profile['add_user_profile']==1)
{
await this.http.get<any[]>(this.apiUrl+'/formytek/public/api/userProfileList', { headers: this.reqHeader })
.toPromise().then(
(res) => {
res.forEach(element => {
this.todo.push(element)
});
}).catch(
(error) => {
}
);
await this.http.get<any[]>(this.apiUrl+'/formytek/public/api/UserProfile', { headers: this.reqHeader })
.toPromise().then(
(res) => {
this.allItems = res;
console.dir(this.allItems);
res.forEach(element => {
this.count=this.count+1;
console.log("ddd"+this.count)
this.LIST_IDS.push('#id_' + this.count);
this.done.push(element)
});
}).catch(
(error) => {
}
);
$('.rolldown-list li').each(function () {
var delay = ($(this).index() / 4) + 's';
$(this).css({
webkitAnimationDelay: delay,
mozAnimationDelay: delay,
animationDelay: delay
});
});
this.spinner.hide();
}else{
this.routers.navigate(['/pages/dashboard']) ;
}
} else if(this.usernam.Login =="Administrator"){
await this.http.get<any[]>(this.apiUrl+'/formytek/public/api/userProfileList', { headers: this.reqHeader })
.toPromise().then(
(res) => {
res.forEach(element => {
this.todo.push(element)
});
}).catch(
(error) => {
}
);
await this.http.get<any[]>(this.apiUrl+'/formytek/public/api/UserProfile', { headers: this.reqHeader })
.toPromise().then(
(res) => {
this.allItems = res;
res.forEach(element => {
this.count=this.count+1;
this.LIST_IDS.push('#id_' + this.count);
this.done.push(element)
});
}).catch(
(error) => {
}
);
$('.rolldown-list li').each(function () {
var delay = ($(this).index() / 4) + 's';
$(this).css({
webkitAnimationDelay: delay,
mozAnimationDelay: delay,
animationDelay: delay
});
});
this.spinner.hide();
}
} | $('#sortable1').sortable({connectWith: this.LIST_IDS,
start: function(e, ui) {
// puts the old positions into array before sorting
var old_position = ui.item.index();
},
update: function(event, ui) {
// grabs the new positions now that we've finished sorting
var new_position = ui.item.index();
this.destId = ui.item.parent().attr("id");
console.log("this"+this.destId)
var item=ui.item;
var iditem=ui.item.attr("id");
this.Name=""
this.Username= ""
this.Name=$("#"+this.destId+" li:first-child").text()
this.Username= ui.item.text();
$('#'+this.destId+'> li .crois').css('display','initial');
self.ProfileService.addUserToProfile(this.Username,this.Name).subscribe(
data => {
var x = document.getElementById("snackbar");
x.className = "show";
setTimeout(function(){ x.className = x.className.replace("show", ""); }, 6000);
},
error=>{
var newItem = '<li _ngcontent-c16 class="ui-state-default ng-star-inserted ui-sortable-handle" (click)="sortable($event)" style="padding:0.5em;margin: 0 5px 5px 5px;font-size: 1.2em;width: 180px;max-width: 180px; word-wrap: break-word;background-color: #DCDCDC;"><i class="nb-person"></i> <p style="width: 50%;position: absolute;">'+this.Username+'</p> <i class="ion-close-round" style="margin-left:70%;position: relative;width: 50px" (click)="deleteuser($event,item)"></i></li>'
$("#sortable1").append(newItem)
if(error['error'].text=='Success')
{
var x = document.getElementById("snackbar");
x.className = "show";
setTimeout(function(){ x.className = x.className.replace("show", ""); }, 6000);
}else{
$('#'+this.destId+' > li:contains('+this.Username+')').closest("li").remove();
var x = document.getElementById("snackbar4");
x.className = "show";
setTimeout(function(){ x.className = x.className.replace("show", ""); }, 3000);
}
})
}
})
$(".sortable2").sortable();
}
deleteuser(event,item){
console.log("hereee")
this.windowService.open(
this.disabledEscTemplate,
{
title: 'Delete user',
hasBackdrop: false,
closeOnEsc: true,
},
);
$(".cdk-overlay-container").css('display','initial');
this.classid =event.currentTarget.classList[0]
this.Usernamed=item;
this.Named=$("#"+this.classid+" li:first-child").text()
}
deleteUser(){
console.dir("hh"+this.Usernamed);
console.dir("hh"+this.Usernamed.Username);
if(this.Usernamed.Username){
var user=this.Usernamed.Username
console.dir("hh1"+user);
}else{
console.dir("hh2"+user);
var user=this.Usernamed
}
console.dir("hh"+user);
this.ProfileService.delteUserFromProfile(user).subscribe(
data => {
},
error=>{
if(error['error'].text=='Success')
{
$('#'+this.classid+' li:contains('+this.Usernamed.Username+')').remove();
console.dir("this.destId"+this.destId);
$('li:contains('+this.Usernamed+')').remove();
var x = document.getElementById("snackbar3");
x.className = "show";
setTimeout(function(){ x.className = x.className.replace("show", ""); }, 3000);
// var newItem = '<li _ngcontent-c16 class="ui-state-default ng-star-inserted ui-sortable-handle" (click)="sortable($event)" style="height: 40px;padding:0.5em;margin: 0 5px 5px 5px;font-size: 1.2em;width: 180px;max-width: 180px; word-wrap: break-word;background-color: #DCDCDC;" ><p style="width: 85%;position: absolute;"><i class="nb-person"></i>'+user+'</p> <i class="ion-close-round crois" style="margin-left:70%;position: relative;width: 50px;display:none" (click)="deleteUser()"></i></li>'
// $("#sortable1").append(newItem)
var newItem = `<li _ngcontent-c16 class="ui-state-default ng-star-inserted ui-sortable-handle" (click)="sortable($event)" style="padding:0.5em;margin: 0 5px 5px 5px;font-size: 1.2em;width: 180px;max-width: 180px; word-wrap: break-word;background-color: #DCDCDC;"><i class="nb-person"></i>`+user+` <i class="ion-close-round crois" style="margin-left:70%;position: relative;width: 50px;display:none" (click)="deleteuser(Event,user)"></i></li>`
$("#sortable1").append(newItem)
$("#sortable1").sortable();
$(".sortable2").sortable();
$( 'li .crois').css('display','none');
}else{
var x = document.getElementById("snackbar2");
x.className = "show";
setTimeout(function(){ x.className = x.className.replace("show", ""); }, 3000);
}
})
$(".cdk-overlay-container").css('display','none');
}
} |
sortable(){
var self = this;
|
docstring.go | // Code generated by gen/docstring.sh DO NOT EDIT.
package main
var genDocString = `
lf is a terminal file manager.
Source code can be found in the repository at
https://github.com/gokcehan/lf.
This documentation can either be read from terminal using 'lf -doc' or
online at https://godoc.org/github.com/gokcehan/lf. You can also use 'doc'
command (default '<f-1>') inside lf to view the documentation in a pager.
You can run 'lf -help' to see descriptions of command line options.
Quick Reference
The following commands are provided by lf:
quit (default 'q')
up (default 'k' and '<up>')
half-up (default '<c-u>')
page-up (default '<c-b>' and '<pgup>')
down (default 'j' and '<down>')
half-down (default '<c-d>')
page-down (default '<c-f>' and '<pgdn>')
updir (default 'h' and '<left>')
open (default 'l' and '<right>')
top (default 'gg' and '<home>')
bottom (default 'G' and '<end>')
toggle
invert (default 'v')
unselect (default 'u')
glob-select
glob-unselect
copy (default 'y')
cut (default 'd')
paste (default 'p')
clear (default 'c')
sync
draw
redraw (default '<c-l>')
load
reload (default '<c-r>')
echo
echomsg
echoerr
cd
select
delete (modal)
rename (modal) (default 'r')
source
push
read (modal) (default ':')
shell (modal) (default '$')
shell-pipe (modal) (default '%')
shell-wait (modal) (default '!')
shell-async (modal) (default '&')
find (modal) (default 'f')
find-back (modal) (default 'F')
find-next (default ';')
find-prev (default ',')
search (modal) (default '/')
search-back (modal) (default '?')
search-next (default 'n')
search-prev (default 'N')
filter (modal)
setfilter
mark-save (modal) (default 'm')
mark-load (modal) (default "'")
mark-remove (modal) (default '"')
The following command line commands are provided by lf:
cmd-escape (default '<esc>')
cmd-complete (default '<tab>')
cmd-menu-complete
cmd-menu-complete-back
cmd-enter (default '<c-j>' and '<enter>')
cmd-interrupt (default '<c-c>')
cmd-history-next (default '<c-n>')
cmd-history-prev (default '<c-p>')
cmd-left (default '<c-b>' and '<left>')
cmd-right (default '<c-f>' and '<right>')
cmd-home (default '<c-a>' and '<home>')
cmd-end (default '<c-e>' and '<end>')
cmd-delete (default '<c-d>' and '<delete>')
cmd-delete-back (default '<backspace>' and '<backspace2>')
cmd-delete-home (default '<c-u>')
cmd-delete-end (default '<c-k>')
cmd-delete-unix-word (default '<c-w>')
cmd-yank (default '<c-y>')
cmd-transpose (default '<c-t>')
cmd-transpose-word (default '<a-t>')
cmd-word (default '<a-f>')
cmd-word-back (default '<a-b>')
cmd-delete-word (default '<a-d>')
cmd-capitalize-word (default '<a-c>')
cmd-uppercase-word (default '<a-u>')
cmd-lowercase-word (default '<a-l>')
The following options can be used to customize the behavior of lf:
anchorfind bool (default on)
autoquit bool (default off)
dircache bool (default on)
dircounts bool (default off)
dirfirst bool (default on)
dironly bool (default off)
drawbox bool (default off)
errorfmt string (default "\033[7;31;47m%s\033[0m")
filesep string (default "\n")
findlen int (default 1)
globsearch bool (default off)
hidden bool (default off)
hiddenfiles []string (default '.*')
icons bool (default off)
ifs string (default '')
ignorecase bool (default on)
ignoredia bool (default on)
incfilter bool (default off)
incsearch bool (default off)
info []string (default '')
mouse bool (default off)
number bool (default off)
period int (default 0)
preview bool (default on)
previewer string (default '')
cleaner string (default '')
promptfmt string (default "\033[32;1m%u@%h\033[0m:\033[34;1m%d\033[0m\033[1m%f\033[0m")
ratios []int (default '1:2:3')
relativenumber bool (default off)
reverse bool (default off)
scrolloff int (default 0)
shell string (default 'sh' for unix and 'cmd' for windows)
shellflag string (default '-c' for unix and '/c' for windows)
shellopts []string (default '')
smartcase bool (default on)
smartdia bool (default off)
sortby string (default 'natural')
tabstop int (default 8)
timefmt string (default 'Mon Jan _2 15:04:05 2006')
timefmtthisy string (default 'Jan _2 15:04')
timefmtothery string (default 'Jan _2 2006')
truncatechar string (default '~')
waitmsg string (default 'Press any key to continue')
wrapscan bool (default on)
wrapscroll bool (default off)
The following environment variables are exported for shell commands:
f
fs
fx
id
PWD
OLDPWD
LF_LEVEL
OPENER
EDITOR
PAGER
SHELL
The following commands/keybindings are provided by default:
unix windows
cmd open &$OPENER "$f" cmd open &%OPENER% %f%
map e $$EDITOR "$f" map e $%EDITOR% %f%
map i $$PAGER "$f" map i !%PAGER% %f%
map w $$SHELL map w $%SHELL%
The following additional keybindings are provided by default:
map zh set hidden!
map zr set reverse!
map zn set info
map zs set info size
map zt set info time
map za set info size:time
map sn :set sortby natural; set info
map ss :set sortby size; set info size
map st :set sortby time; set info time
map sa :set sortby atime; set info atime
map sc :set sortby ctime; set info ctime
map se :set sortby ext; set info
map gh cd ~
map <space> :toggle; down
Configuration
Configuration files should be located at:
os system-wide user-specific
unix /etc/lf/lfrc ~/.config/lf/lfrc
windows C:\ProgramData\lf\lfrc C:\Users\<user>\AppData\Local\lf\lfrc
Selection file should be located at:
unix ~/.local/share/lf/files
windows C:\Users\<user>\AppData\Local\lf\files
Marks file should be located at:
unix ~/.local/share/lf/marks
windows C:\Users\<user>\AppData\Local\lf\marks
History file should be located at:
unix ~/.local/share/lf/history
windows C:\Users\<user>\AppData\Local\lf\history
You can configure the default values of following variables to change these
locations:
$XDG_CONFIG_HOME ~/.config
$XDG_DATA_HOME ~/.local/share
%ProgramData% C:\ProgramData
%LOCALAPPDATA% C:\Users\<user>\AppData\Local
A sample configuration file can be found at
https://github.com/gokcehan/lf/blob/master/etc/lfrc.example.
Commands
This section shows information about builtin commands. Modal commands do not
take any arguments, but instead change the operation mode to read their
input conveniently, and so they are meant to be assigned to keybindings.
quit (default 'q')
Quit lf and return to the shell.
up (default 'k' and '<up>')
half-up (default '<c-u>')
page-up (default '<c-b>' and '<pgup>')
down (default 'j' and '<down>')
half-down (default '<c-d>')
page-down (default '<c-f>' and '<pgdn>')
Move the current file selection upwards/downwards by one/half a page/full
page.
updir (default 'h' and '<left>')
Change the current working directory to the parent directory.
open (default 'l' and '<right>')
If the current file is a directory, then change the current directory to it,
otherwise, execute the 'open' command. A default 'open' command is provided
to call the default system opener asynchronously with the current file as
the argument. A custom 'open' command can be defined to override this
default.
(See also 'OPENER' variable and 'Opening Files' section)
top (default 'gg' and '<home>')
bottom (default 'G' and '<end>')
Move the current file selection to the top/bottom of the directory.
toggle
Toggle the selection of the current file or files given as arguments.
invert (default 'v')
Reverse the selection of all files in the current directory (i.e. 'toggle'
all files). Selections in other directories are not effected by this
command. You can define a new command to select all files in the directory
by combining 'invert' with 'unselect' (i.e. 'cmd select-all :unselect;
invert'), though this will also remove selections in other directories.
unselect (default 'u')
Remove the selection of all files in all directories.
glob-select
Select files that match the given glob.
glob-unselect
Unselect files that match the given glob.
copy (default 'y')
If there are no selections, save the path of the current file to the copy
buffer, otherwise, copy the paths of selected files.
cut (default 'd')
If there are no selections, save the path of the current file to the cut
buffer, otherwise, copy the paths of selected files.
paste (default 'p')
Copy/Move files in copy/cut buffer to the current working directory.
clear (default 'c')
Clear file paths in copy/cut buffer.
sync
Synchronize copied/cut files with server. This command is automatically
called when required.
draw
Draw the screen. This command is automatically called when required.
redraw (default '<c-l>')
Synchronize the terminal and redraw the screen.
load
Load modified files and directories. This command is automatically called
when required.
reload (default '<c-r>')
Flush the cache and reload all files and directories.
echo
Print given arguments to the message line at the bottom.
echomsg
Print given arguments to the message line at the bottom and also to the log
file.
echoerr
Print given arguments to the message line at the bottom in red color and
also to the log file.
cd
Change the working directory to the given argument.
select
Change the current file selection to the given argument.
delete (modal)
Remove the current file or selected file(s).
rename (modal) (default 'r')
Rename the current file using the builtin method. A custom 'rename' command
can be defined to override this default.
source
Read the configuration file given in the argument.
push
Simulate key pushes given in the argument.
read (modal) (default ':')
Read a command to evaluate.
shell (modal) (default '$')
Read a shell command to execute.
(See also 'Prefixes' and 'Shell Commands' sections)
shell-pipe (modal) (default '%')
Read a shell command to execute piping its standard I/O to the bottom
statline.
(See also 'Prefixes' and 'Piping Shell Commands' sections)
shell-wait (modal) (default '!')
Read a shell command to execute and wait for a key press in the end.
(See also 'Prefixes' and 'Waiting Shell Commands' sections)
shell-async (modal) (default '&')
Read a shell command to execute asynchronously without standard I/O.
find (modal) (default 'f')
find-back (modal) (default 'F')
find-next (default ';')
find-prev (default ',')
Read key(s) to find the appropriate file name match in the forward/backward
direction and jump to the next/previous match.
(See also 'anchorfind', 'findlen', 'wrapscan', 'ignorecase', 'smartcase',
'ignoredia', and 'smartdia' options and 'Searching Files' section)
search (default '/')
search-back (default '?')
search-next (default 'n')
search-prev (default 'N')
Read a pattern to search for a file name match in the forward/backward
direction and jump to the next/previous match.
(See also 'globsearch', 'incsearch', 'wrapscan', 'ignorecase', 'smartcase',
'ignoredia', and 'smartdia' options and 'Searching Files' section)
filter (modal)
setfilter
Read a pattern to filter out and only view files matching the pattern.
setfilter does the same but uses an argument to set the filter immediatly.
You can supply an argument to filter, in order to use that as the starting
prompt.
(See also 'globsearch', 'incfilter', 'ignorecase', 'smartcase', 'ignoredia',
and 'smartdia' options)
mark-save (modal) (default 'm')
Save the current directory as a bookmark assigned to the given key.
mark-load (modal) (default "'")
Change the current directory to the bookmark assigned to the given key. A
special bookmark "'" holds the previous directory after a 'mark-load', 'cd',
or 'select' command.
mark-remove (modal) (default '"')
Remove a bookmark assigned to the given key.
Command Line Commands
This section shows information about command line commands. These should be
mostly compatible with readline keybindings. A character refers to a unicode
code point, a word consists of letters and digits, and a unix word consists
of any non-blank characters.
cmd-escape (default '<esc>')
Quit command line mode and return to normal mode.
cmd-complete (default '<tab>')
Autocomplete the current word.
cmd-menu-complete
Autocomplete the current word, then you can press the binded key/s again to
cycle completition options.
cmd-menu-complete-back
Autocomplete the current word, then you can press the binded key/s again to
cycle completition options backwards.
cmd-enter (default '<c-j>' and '<enter>')
Execute the current line.
cmd-interrupt (default '<c-c>')
Interrupt the current shell-pipe command and return to the normal mode.
cmd-history-next (default '<c-n>')
cmd-history-prev (default '<c-p>')
Go to next/previous item in the history.
cmd-left (default '<c-b>' and '<left>')
cmd-right (default '<c-f>' and '<right>')
Move the cursor to the left/right.
cmd-home (default '<c-a>' and '<home>')
cmd-end (default '<c-e>' and '<end>')
Move the cursor to the beginning/end of line.
cmd-delete (default '<c-d>' and '<delete>')
cmd-delete-back (default '<backspace>' and '<backspace2>')
Delete the next character in forward/backward direction.
cmd-delete-home (default '<c-u>')
cmd-delete-end (default '<c-k>')
Delete everything up to the beginning/end of line.
cmd-delete-unix-word (default '<c-w>')
Delete the previous unix word.
cmd-yank (default '<c-y>')
Paste the buffer content containing the last deleted item.
cmd-transpose (default '<c-t>')
cmd-transpose-word (default '<a-t>')
Transpose the positions of last two characters/words.
cmd-word (default '<a-f>')
cmd-word-back (default '<a-b>')
Move the cursor by one word in forward/backward direction.
cmd-delete-word (default '<a-d>')
Delete the next word in forward direction.
cmd-capitalize-word (default '<a-c>')
cmd-uppercase-word (default '<a-u>')
cmd-lowercase-word (default '<a-l>')
Capitalize/uppercase/lowercase the current word and jump to the next word.
Options
This section shows information about options to customize the behavior.
Character ':' is used as the separator for list options '[]int' and
'[]string'.
anchorfind bool (default on)
When this option is enabled, find command starts matching patterns from the
beginning of file names, otherwise, it can match at an arbitrary position.
autoquit bool (default off)
Automatically quit server when there are no clients left connected.
dircache bool (default on)
Cache directory contents.
dircounts bool (default off)
When this option is enabled, directory sizes show the number of items inside
instead of the size of directory file. The former needs to be calculated by
reading the directory and counting the items inside. The latter is directly
provided by the operating system and it does not require any calculation,
though it is non-intuitive and it can often be misleading. This option is
disabled by default for performance reasons. This option only has an effect |
dirfirst bool (default on)
Show directories first above regular files.
dironly bool (default off)
Show only directories.
drawbox bool (default off)
Draw boxes around panes with box drawing characters.
errorfmt string (default "\033[7;31;47m%s\033[0m")
Format string of error messages shown in the bottom message line.
filesep string (default "\n")
File separator used in environment variables 'fs' and 'fx'.
findlen int (default 1)
Number of characters prompted for the find command. When this value is set
to 0, find command prompts until there is only a single match left.
globsearch bool (default off)
When this option is enabled, search command patterns are considered as
globs, otherwise they are literals. With globbing, '*' matches any sequence,
'?' matches any character, and '[...]' or '[^...] matches character sets or
ranges. Otherwise, these characters are interpreted as they are.
hidden bool (default off)
Show hidden files. On unix systems, hidden files are determined by the value
of 'hiddenfiles'. On windows, only files with hidden attributes are
considered hidden files.
hiddenfiles []string (default '.*')
List of hidden file glob patterns. Patterns can be given as relative or
absolute paths. Globbing supports the usual special characters, '*' to match
any sequence, '?' to match any character, and '[...]' or '[^...] to match
character sets or ranges. In addition, if a pattern starts with '!', then
its matches are excluded from hidden files.
icons bool (default off)
Show icons before each item in the list. By default, only two icons, 🗀
(U+1F5C0) and 🗎 (U+1F5CE), are used for directories and files respectively,
as they are supported in the unicode standard. Icons can be configured with
an environment variable named 'LF_ICONS'. The syntax of this variable is
similar to 'LS_COLORS'. See the wiki page for an example icon configuration.
ifs string (default '')
Sets 'IFS' variable in shell commands. It works by adding the assignment to
the beginning of the command string as 'IFS='...'; ...'. The reason is that
'IFS' variable is not inherited by the shell for security reasons. This
method assumes a POSIX shell syntax and so it can fail for non-POSIX shells.
This option has no effect when the value is left empty. This option does not
have any effect on windows.
ignorecase bool (default on)
Ignore case in sorting and search patterns.
ignoredia bool (default on)
Ignore diacritics in sorting and search patterns.
incsearch bool (default off)
Jump to the first match after each keystroke during searching.
incfilter bool (default off)
Apply filter pattern after each keystroke during filtering.
info []string (default '')
List of information shown for directory items at the right side of pane.
Currently supported information types are 'size', 'time', 'atime', and
'ctime'. Information is only shown when the pane width is more than twice
the width of information.
mouse bool (default off)
Send mouse events as input.
number bool (default off)
Show the position number for directory items at the left side of pane. When
'relativenumber' is enabled, only the current line shows the absolute
position and relative positions are shown for the rest.
period int (default 0)
Set the interval in seconds for periodic checks of directory updates. This
works by periodically calling the 'load' command. Note that directories are
already updated automatically in many cases. This option can be useful when
there is an external process changing the displayed directory and you are
not doing anything in lf. Periodic checks are disabled when the value of
this option is set to zero.
preview bool (default on)
Show previews of files and directories at the right most pane. If the file
has more lines than the preview pane, rest of the lines are not read. Files
containing the null character (U+0000) in the read portion are considered
binary files and displayed as 'binary'.
previewer string (default '') (not filtered if empty)
Set the path of a previewer file to filter the content of regular files for
previewing. The file should be executable. Five arguments are passed to the
file, first is the current file name; the second, third, fourth, and fifth
are width, height, horizontal position, and vertical position of preview
pane respectively. SIGPIPE signal is sent when enough lines are read. If the
previewer returns a non-zero exit code, then the preview cache for the given
file is disabled. This means that if the file is selected in the future, the
previewer is called once again. Preview filtering is disabled and files are
displayed as they are when the value of this option is left empty.
cleaner string (default '') (not called if empty)
Set the path of a cleaner file. This file will be called if previewing is
enabled, the previewer is set, and the previously selected file had its
preview cache disabled. The file should be executable. One argument is
passed to the file; the path to the file whose preview should be cleaned.
Preview clearing is disabled when the value of this option is left empty.
promptfmt string (default "\033[32;1m%u@%h\033[0m:\033[34;1m%d\033[0m\033[1m%f\033[0m")
Format string of the prompt shown in the top line. Special expansions are
provided, '%u' as the user name, '%h' as the host name, '%w' as the working
directory, '%d' as the working directory with a trailing path separator,
'%f' as the file name, and '%F' as the current filter. Home folder is shown
as '~' in the working directory expansion. Directory names are automatically
shortened to a single character starting from the left most parent when the
prompt does not fit to the screen.
ratios []int (default '1:2:3')
List of ratios of pane widths. Number of items in the list determines the
number of panes in the ui. When 'preview' option is enabled, the right most
number is used for the width of preview pane.
relativenumber bool (default off)
Show the position number relative to the current line. When 'number' is
enabled, current line shows the absolute position, otherwise nothing is
shown.
reverse bool (default off)
Reverse the direction of sort.
scrolloff int (default 0)
Minimum number of offset lines shown at all times in the top and the bottom
of the screen when scrolling. The current line is kept in the middle when
this option is set to a large value that is bigger than the half of number
of lines. A smaller offset can be used when the current file is close to the
beginning or end of the list to show the maximum number of items.
shell string (default 'sh' for unix and 'cmd' for windows)
Shell executable to use for shell commands. Shell commands are executed as
'shell shellopts shellflag command -- arguments'.
shellflag string (default '-c' for unix and '/c' for windows)
Command line flag used to pass shell commands.
shellopts []string (default '')
List of shell options to pass to the shell executable.
smartcase bool (default on)
Override 'ignorecase' option when the pattern contains an uppercase
character. This option has no effect when 'ignorecase' is disabled.
smartdia bool (default off)
Override 'ignoredia' option when the pattern contains a character with
diacritic. This option has no effect when 'ignoredia' is disabled.
sortby string (default 'natural')
Sort type for directories. Currently supported sort types are 'natural',
'name', 'size', 'time', 'ctime', 'atime', and 'ext'.
tabstop int (default 8)
Number of space characters to show for horizontal tabulation (U+0009)
character.
timefmt string (default 'Mon Jan _2 15:04:05 2006')
Format string of the file modification time shown in the bottom line.
timefmtthisy string (default 'Jan _2 15:04')
Format string of the file time shown in the info column when it matches this
year.
timefmtothery string (default 'Jan _2 2006')
Format string of the file time shown in the info column when it doesn't
match this year.
truncatechar string (default '~')
Truncate character shown at the end when the file name does not fit to the
pane.
waitmsg string (default 'Press any key to continue')
String shown after commands of shell-wait type.
wrapscan bool (default on)
Searching can wrap around the file list.
wrapscroll bool (default off)
Scrolling can wrap around the file list.
Environment Variables
The following variables are exported for shell commands: These are referred
with a '$' prefix on POSIX shells (e.g. '$f'), between '%' characters on
Windows cmd (e.g. '%f%'), and with a '$env:' prefix on Windows powershell
(e.g. '$env:f').
f
Current file selection as a full path.
fs
Selected file(s) separated with the value of 'filesep' option as full
path(s).
fx
Selected file(s) (i.e. 'fs') if there are any selected files, otherwise
current file selection (i.e. 'f').
id
Id of the running client.
PWD
Present working directory.
OLDPWD
Initial working directory.
LF_LEVEL
The value of this variable is set to the current nesting level when you run
lf from a shell spawned inside lf. You can add the value of this variable to
your shell prompt to make it clear that your shell runs inside lf. For
example, with POSIX shells, you can use '[ -n "$LF_LEVEL" ] &&
PS1="$PS1""(lf level: $LF_LEVEL) "' in your shell configuration file (e.g.
'~/.bashrc').
OPENER
If this variable is set in the environment, use the same value, otherwise
set the value to 'start' in Windows, 'open' in MacOS, 'xdg-open' in others.
EDITOR
If this variable is set in the environment, use the same value, otherwise
set the value to 'vi' on unix, 'notepad' in Windows.
PAGER
If this variable is set in the environment, use the same value, otherwise
set the value to 'less' on unix, 'more' in Windows.
SHELL
If this variable is set in the environment, use the same value, otherwise
set the value to 'sh' on unix, 'cmd' in Windows.
Prefixes
The following command prefixes are used by lf:
: read (default) builtin/custom command
$ shell shell command
% shell-pipe shell command running with the ui
! shell-wait shell command waiting for key press
& shell-async shell command running asynchronously
The same evaluator is used for the command line and the configuration file
for read and shell commands. The difference is that prefixes are not
necessary in the command line. Instead, different modes are provided to read
corresponding commands. These modes are mapped to the prefix keys above by
default.
Syntax
Characters from '#' to newline are comments and ignored:
# comments start with '#'
There are three special commands ('set', 'map', and 'cmd') and their
variants for configuration.
Command 'set' is used to set an option which can be boolean, integer, or
string:
set hidden # boolean on
set nohidden # boolean off
set hidden! # boolean toggle
set scrolloff 10 # integer value
set sortby time # string value w/o quotes
set sortby 'time' # string value with single quotes (whitespaces)
set sortby "time" # string value with double quotes (backslash escapes)
Command 'map' is used to bind a key to a command which can be builtin
command, custom command, or shell command:
map gh cd ~ # builtin command
map D trash # custom command
map i $less $f # shell command
map U !du -csh * # waiting shell command
Command 'cmap' is used to bind a key to a command line command which can
only be one of the builtin commands:
cmap <c-g> cmd-escape
You can delete an existing binding by leaving the expression empty:
map gh # deletes 'gh' mapping
cmap <c-g> # deletes '<c-g>' mapping
Command 'cmd' is used to define a custom command:
cmd usage $du -h -d1 | less
You can delete an existing command by leaving the expression empty:
cmd trash # deletes 'trash' command
If there is no prefix then ':' is assumed:
map zt set info time
An explicit ':' can be provided to group statements until a newline which is
especially useful for 'map' and 'cmd' commands:
map st :set sortby time; set info time
If you need multiline you can wrap statements in '{{' and '}}' after the
proper prefix.
map st :{{
set sortby time
set info time
}}
Key Mappings
Regular keys are assigned to a command with the usual syntax:
map a down
Keys combined with the shift key simply use the uppercase letter:
map A down
Special keys are written in between '<' and '>' characters and always use
lowercase letters:
map <enter> down
Angle brackets can be assigned with their special names:
map <lt> down
map <gt> down
Function keys are prefixed with 'f' character:
map <f-1> down
Keys combined with the control key are prefixed with 'c' character:
map <c-a> down
Keys combined with the alt key are assigned in two different ways depending
on the behavior of your terminal. Older terminals (e.g. xterm) may set the
8th bit of a character when the alt key is pressed. On these terminals, you
can use the corresponding byte for the mapping:
map á down
Newer terminals (e.g. gnome-terminal) may prefix the key with an escape key
when the alt key is pressed. lf uses the escape delaying mechanism to
recognize alt keys in these terminals (delay is 100ms). On these terminals,
keys combined with the alt key are prefixed with 'a' character:
map <a-a> down
Please note that, some key combinations are not possible due to the way
terminals work (e.g. control and h combination sends a backspace key
instead). The easiest way to find the name of a key combination is to press
the key while lf is running and read the name of the key from the unknown
mapping error.
Mouse buttons are prefixed with 'm' character:
map <m-1> down # primary
map <m-2> down # secondary
map <m-3> down # middle
map <m-4> down
map <m-5> down
map <m-6> down
map <m-7> down
map <m-8> down
Mouse wheel events are also prefixed with 'm' character:
map <m-up> down
map <m-down> down
map <m-left> down
map <m-right> down
Push Mappings
The usual way to map a key sequence is to assign it to a named or unnamed
command. While this provides a clean way to remap builtin keys as well as
other commands, it can be limiting at times. For this reason 'push' command
is provided by lf. This command is used to simulate key pushes given as its
arguments. You can 'map' a key to a 'push' command with an argument to
create various keybindings.
This is mainly useful for two purposes. First, it can be used to map a
command with a command count:
map <c-j> push 10j
Second, it can be used to avoid typing the name when a command takes
arguments:
map r push :rename<space>
One thing to be careful is that since 'push' command works with keys instead
of commands it is possible to accidentally create recursive bindings:
map j push 2j
These types of bindings create a deadlock when executed.
Shell Commands
Regular shell commands are the most basic command type that is useful for
many purposes. For example, we can write a shell command to move selected
file(s) to trash. A first attempt to write such a command may look like
this:
cmd trash ${{
mkdir -p ~/.trash
if [ -z "$fs" ]; then
mv "$f" ~/.trash
else
IFS="'printf '\n\t''"; mv $fs ~/.trash
fi
}}
We check '$fs' to see if there are any selected files. Otherwise we just
delete the current file. Since this is such a common pattern, a separate
'$fx' variable is provided. We can use this variable to get rid of the
conditional:
cmd trash ${{
mkdir -p ~/.trash
IFS="'printf '\n\t''"; mv $fx ~/.trash
}}
The trash directory is checked each time the command is executed. We can
move it outside of the command so it would only run once at startup:
${{ mkdir -p ~/.trash }}
cmd trash ${{ IFS="'printf '\n\t''"; mv $fx ~/.trash }}
Since these are one liners, we can drop '{{' and '}}':
$mkdir -p ~/.trash
cmd trash $IFS="'printf '\n\t''"; mv $fx ~/.trash
Finally note that we set 'IFS' variable manually in these commands. Instead
we could use the 'ifs' option to set it for all shell commands (i.e. 'set
ifs "\n"'). This can be especially useful for interactive use (e.g. '$rm $f'
or '$rm $fs' would simply work). This option is not set by default as it can
behave unexpectedly for new users. However, use of this option is highly
recommended and it is assumed in the rest of the documentation.
Piping Shell Commands
Regular shell commands have some limitations in some cases. When an output
or error message is given and the command exits afterwards, the ui is
immediately resumed and there is no way to see the message without dropping
to shell again. Also, even when there is no output or error, the ui still
needs to be paused while the command is running. This can cause flickering
on the screen for short commands and similar distractions for longer
commands.
Instead of pausing the ui, piping shell commands connects stdin, stdout, and
stderr of the command to the statline in the bottom of the ui. This can be
useful for programs following the unix philosophy to give no output in the
success case, and brief error messages or prompts in other cases.
For example, following rename command prompts for overwrite in the statline
if there is an existing file with the given name:
cmd rename %mv -i $f $1
You can also output error messages in the command and it will show up in the
statline. For example, an alternative rename command may look like this:
cmd rename %[ -e $1 ] && printf "file exists" || mv $f $1
Note that input is line buffered and output and error are byte buffered.
Waiting Shell Commands
Waiting shell commands are similar to regular shell commands except that
they wait for a key press when the command is finished. These can be useful
to see the output of a program before the ui is resumed. Waiting shell
commands are more appropriate than piping shell commands when the command is
verbose and the output is best displayed as multiline.
Asynchronous Shell Commands
Asynchronous shell commands are used to start a command in the background
and then resume operation without waiting for the command to finish. Stdin,
stdout, and stderr of the command is neither connected to the terminal nor
to the ui.
Remote Commands
One of the more advanced features in lf is remote commands. All clients
connect to a server on startup. It is possible to send commands to all or
any of the connected clients over the common server. This is used internally
to notify file selection changes to other clients.
To use this feature, you need to use a client which supports communicating
with a UNIX-domain socket. OpenBSD implementation of netcat (nc) is one such
example. You can use it to send a command to the socket file:
echo 'send echo hello world' | nc -U /tmp/lf.${USER}.sock
Since such a client may not be available everywhere, lf comes bundled with a
command line flag to be used as such. When using lf, you do not need to
specify the address of the socket file. This is the recommended way of using
remote commands since it is shorter and immune to socket file address
changes:
lf -remote 'send echo hello world'
In this command 'send' is used to send the rest of the string as a command
to all connected clients. You can optionally give it an id number to send a
command to a single client:
lf -remote 'send 1234 echo hello world'
All clients have a unique id number but you may not be aware of the id
number when you are writing a command. For this purpose, an '$id' variable
is exported to the environment for shell commands. The value of this
variable is set to the process id of the client. You can use it to send a
remote command from a client to the server which in return sends a command
back to itself. So now you can display a message in the current client by
calling the following in a shell command:
lf -remote "send $id echo hello world"
Since lf does not have control flow syntax, remote commands are used for
such needs. For example, you can configure the number of columns in the ui
with respect to the terminal width as follows:
cmd recol %{{
w=$(tput cols)
if [ $w -le 80 ]; then
lf -remote "send $id set ratios 1:2"
elif [ $w -le 160 ]; then
lf -remote "send $id set ratios 1:2:3"
else
lf -remote "send $id set ratios 1:2:3:5"
fi
}}
Besides 'send' command, there is a 'quit' command to quit the server when
there are no connected clients left, and a 'quit!' command to force quit the
server by closing client connections first:
lf -remote 'quit'
lf -remote 'quit!'
Lastly, there is a 'conn' command to connect the server as a client. This
should not be needed for users.
File Operations
lf uses its own builtin copy and move operations by default. These are
implemented as asynchronous operations and progress is shown in the bottom
ruler. These commands do not overwrite existing files or directories with
the same name. Instead, a suffix that is compatible with '--backup=numbered'
option in GNU cp is added to the new files or directories. Only file modes
are preserved and all other attributes are ignored including ownership,
timestamps, context, and xattr. Special files such as character and block
devices, named pipes, and sockets are skipped and links are not followed.
Moving is performed using the rename operation of the underlying OS. For
cross-device moving, lf falls back to copying and then deletes the original
files if there are no errors. Operation errors are shown in the message line
as well as the log file and they do not preemptively finish the
corresponding file operation.
File operations can be performed on the current selected file or
alternatively on multiple files by selecting them first. When you 'copy' a
file, lf doesn't actually copy the file on the disk, but only records its
name to a file. The actual file copying takes place when you 'paste'.
Similarly 'paste' after a 'cut' operation moves the file.
You can customize copy and move operations by defining a 'paste' command.
This is a special command that is called when it is defined instead of the
builtin implementation. You can use the following example as a starting
point:
cmd paste %{{
load=$(cat ~/.local/share/lf/files)
mode=$(echo "$load" | sed -n '1p')
list=$(echo "$load" | sed '1d')
if [ $mode = 'copy' ]; then
cp -R $list .
elif [ $mode = 'move' ]; then
mv $list .
fi
rm ~/.local/share/lf/files
lf -remote 'send clear'
}}
Some useful things to be considered are to use the backup ('--backup')
and/or preserve attributes ('-a') options with 'cp' and 'mv' commands if
they support it (i.e. GNU implementation), change the command type to
asynchronous, or use 'rsync' command with progress bar option for copying
and feed the progress to the client periodically with remote 'echo' calls.
By default, lf does not assign 'delete' command to a key to protect new
users. You can customize file deletion by defining a 'delete' command. You
can also assign a key to this command if you like. An example command to
move selected files to a trash folder and remove files completely after a
prompt are provided in the example configuration file.
Searching Files
There are two mechanisms implemented in lf to search a file in the current
directory. Searching is the traditional method to move the selection to a
file matching a given pattern. Finding is an alternative way to search for a
pattern possibly using fewer keystrokes.
Searching mechanism is implemented with commands 'search' (default '/'),
'search-back' (default '?'), 'search-next' (default 'n'), and 'search-prev'
(default 'N'). You can enable 'globsearch' option to match with a glob
pattern. Globbing supports '*' to match any sequence, '?' to match any
character, and '[...]' or '[^...] to match character sets or ranges. You can
enable 'incsearch' option to jump to the current match at each keystroke
while typing. In this mode, you can either use 'cmd-enter' to accept the
search or use 'cmd-escape' to cancel the search. Alternatively, you can also
map some other commands with 'cmap' to accept the search and execute the
command immediately afterwards. Possible candidates are 'up', 'down' and
their variants, 'top', 'bottom', 'updir', and 'open' commands. For example,
you can use arrow keys to finish the search with the following mappings:
cmap <up> up
cmap <down> down
cmap <left> updir
cmap <right> open
Finding mechanism is implemented with commands 'find' (default 'f'),
'find-back' (default 'F'), 'find-next' (default ';'), 'find-prev' (default
','). You can disable 'anchorfind' option to match a pattern at an arbitrary
position in the filename instead of the beginning. You can set the number of
keys to match using 'findlen' option. If you set this value to zero, then
the the keys are read until there is only a single match. Default values of
these two options are set to jump to the first file with the given initial.
Some options effect both searching and finding. You can disable 'wrapscan'
option to prevent searches to wrap around at the end of the file list. You
can disable 'ignorecase' option to match cases in the pattern and the
filename. This option is already automatically overridden if the pattern
contains upper case characters. You can disable 'smartcase' option to
disable this behavior. Two similar options 'ignoredia' and 'smartdia' are
provided to control matching diacritics in latin letters.
Opening Files
You can define a an 'open' command (default 'l' and '<right>') to configure
file opening. This command is only called when the current file is not a
directory, otherwise the directory is entered instead. You can define it
just as you would define any other command:
cmd open $vi $fx
It is possible to use different command types:
cmd open &xdg-open $f
You may want to use either file extensions or mime types from 'file'
command:
cmd open ${{
test -L $f && f=$(readlink -f $f)
case $(file --mime-type $f -b) in
text/*) vi $fx;;
*) for f in $fx; do xdg-open $f > /dev/null 2> /dev/null & done;;
esac
}}
You may want to use 'setsid' before your opener command to have persistent
processes that continue to run after lf quits.
Following command is provided by default:
cmd open &$OPENER $f
You may also use any other existing file openers as you like. Possible
options are 'libfile-mimeinfo-perl' (executable name is 'mimeopen'), 'rifle'
(ranger's default file opener), or 'mimeo' to name a few.
Previewing Files
lf previews files on the preview pane by printing the file until the end or
the preview pane is filled. This output can be enhanced by providing a
custom preview script for filtering. This can be used to highlight source
codes, list contents of archive files or view pdf or image files as text to
name few. For coloring lf recognizes ansi escape codes.
In order to use this feature you need to set the value of 'previewer' option
to the path of an executable file. lf passes the current file name as the
first argument and the height of the preview pane as the second argument
when running this file. Output of the execution is printed in the preview
pane. You may want to use the same script in your pager mapping as well if
any:
set previewer ~/.config/lf/pv.sh
map i $~/.config/lf/pv.sh $f | less -R
For 'less' pager, you may instead utilize 'LESSOPEN' mechanism so that
useful information about the file such as the full path of the file can be
displayed in the statusline below:
set previewer ~/.config/lf/pv.sh
map i $LESSOPEN='| ~/.config/lf/pv.sh %s' less -R $f
Since this script is called for each file selection change it needs to be as
efficient as possible and this responsibility is left to the user. You may
use file extensions to determine the type of file more efficiently compared
to obtaining mime types from 'file' command. Extensions can then be used to
match cleanly within a conditional:
#!/bin/sh
case "$1" in
*.tar*) tar tf "$1";;
*.zip) unzip -l "$1";;
*.rar) unrar l "$1";;
*.7z) 7z l "$1";;
*.pdf) pdftotext "$1" -;;
*) highlight -O ansi "$1";;
esac
Another important consideration for efficiency is the use of programs with
short startup times for preview. For this reason, 'highlight' is recommended
over 'pygmentize' for syntax highlighting. Besides, it is also important
that the application is processing the file on the fly rather than first
reading it to the memory and then do the processing afterwards. This is
especially relevant for big files. lf automatically closes the previewer
script output pipe with a SIGPIPE when enough lines are read. When
everything else fails, you can make use of the height argument to only feed
the first portion of the file to a program for preview. Note that some
programs may not respond well to SIGPIPE to exit with a non-zero return code
and avoid caching. You may add a trailing '|| true' command to avoid such
errors:
highlight -O ansi "$1" || true
You may also use an existing preview filter as you like. Your system may
already come with a preview filter named 'lesspipe'. These filters may have
a mechanism to add user customizations as well. See the related
documentations for more information.
Changing Directory
lf changes the working directory of the process to the current directory so
that shell commands always work in the displayed directory. After quitting,
it returns to the original directory where it is first launched like all
shell programs. If you want to stay in the current directory after quitting,
you can use one of the example wrapper shell scripts provided in the
repository.
There is a special command 'on-cd' that runs a shell command when it is
defined and the directory is changed. You can define it just as you would
define any other command:
cmd on-cd &{{
# display git repository status in your prompt
source /usr/share/git/completion/git-prompt.sh
GIT_PS1_SHOWDIRTYSTATE=auto
GIT_PS1_SHOWSTASHSTATE=auto
GIT_PS1_SHOWUNTRACKEDFILES=auto
GIT_PS1_SHOWUPSTREAM=auto
git=$(__git_ps1 " (%s)") || true
fmt="\033[32;1m%u@%h\033[0m:\033[34;1m%d\033[0m\033[1m%f$git\033[0m"
lf -remote "send $id set promptfmt \"$fmt\""
}}
If you want to print escape sequences, you may redirect 'printf' output to
'/dev/tty'. The following xterm specific escape sequence sets the terminal
title to the working directory:
cmd on-cd &{{
printf "\033]0; $PWD\007" > /dev/tty
}}
This command runs whenever you change directory but not on startup. You can
add an extra call to make it run on startup as well:
cmd on-cd &{{ # ... }}
on-cd
Note that all shell commands are possible but '%' and '&' are usually more
appropriate as '$' and '!' causes flickers and pauses respectively.
There is also a 'pre-cd' command, that works like 'on-cd', but is run before
the directory is actually changed. It is generally a bad idea, to put
movement commands (like 'up' / 'top' etc.) here.
Colors
lf tries to automatically adapt its colors to the environment. It starts
with a default colorscheme and updates colors using values of existing
environment variables possibly by overwriting its previous values. Colors
are set in the following order:
1. default
2. LSCOLORS (Mac/BSD ls)
3. LS_COLORS (GNU ls)
4. LF_COLORS (lf specific)
Please refer to the corresponding man pages for more information about
'LSCOLORS' and 'LS_COLORS'. 'LF_COLORS' is provided with the same syntax as
'LS_COLORS' in case you want to configure colors only for lf but not ls.
This can be useful since there are some differences between ls and lf,
though one should expect the same behavior for common cases.
You can configure lf colors in two different ways. First, you can only
configure 8 basic colors used by your terminal and lf should pick up those
colors automatically. Depending on your terminal, you should be able to
select your colors from a 24-bit palette. This is the recommended approach
as colors used by other programs will also match each other.
Second, you can set the values of environmental variables mentioned above
for fine grained customization. Note that 'LS_COLORS/LF_COLORS' are more
powerful than 'LSCOLORS' and they can be used even when GNU programs are not
installed on the system. You can combine this second method with the first
method for best results.
Lastly, you may also want to configure the colors of the prompt line to
match the rest of the colors. Colors of the prompt line can be configured
using the 'promptfmt' option which can include hardcoded colors as ansi
escapes. See the default value of this option to have an idea about how to
color this line.
It is worth noting that lf uses as many colors are advertised by your
terminal's entry in your systems terminfo or infocmp database, if this is
not present lf will default to an internal database. For terminals
supporting 24-bit (or "true") color that do not have a database entry (or
one that does not advertise all capabilities), support can be enabled by
either setting the '$COLORTERM' variable to "truecolor" or ensuring '$TERM'
is set to a value that ends with "-truecolor".
Default lf colors are mostly taken from GNU dircolors defaults. These
defaults use 8 basic colors and bold attribute. Default dircolors entries
with background colors are simplified to avoid confusion with current file
selection in lf. Similarly, there are only file type matchings and extension
matchings are left out for simplicity. Default values are as follows given
with their matching order in lf:
ln 01;36
or 31;01
tw 01;34
ow 01;34
st 01;34
di 01;34
pi 33
so 01;35
bd 33;01
cd 33;01
su 01;32
sg 01;32
ex 01;32
fi 00
Note that, lf first tries matching file names and then falls back to file
types. The full order of matchings from most specific to least are as
follows:
1. Full Path (e.g. '~/.config/lf/lfrc')
2. Dir Name (e.g. '.git/') (only matches dirs with a trailing slash at the end)
3. File Type (e.g. 'ln') (except 'fi')
4. File Name (e.g. '.git*') (only matches files with a trailing star at the end)
5. Base Name (e.g. 'README.*')
6. Extension (e.g. '*.txt')
7. Default (i.e. 'fi')
For example, given a regular text file '/path/to/README.txt', the following
entries are checked in the configuration and the first one to match is used:
1. '/path/to/README.txt'
2. (skipped since the file is not a directory)
3. (skipped since the file is of type 'fi')
4. 'README.txt*'
5. 'README.*'
6. '*.txt'
7. 'fi'
Given a regular directory '/path/to/example.d', the following entries are
checked in the configuration and the first one to match is used:
1. '/path/to/example.d'
2. 'example.d/'
3. 'di'
4. 'example.d*'
5. 'example.*'
6. '*.d'
7. 'fi'
Note that glob-like patterns do not actually perform glob matching due to
performance reasons.
For example, you can set a variable as follows:
export LF_COLORS="~/Documents=01;31:~/Downloads=01;31:~/.local/share=01;31:~/.config/lf/lfrc=31:.git/=01;32:.git=32:.gitignore=32:Makefile=32:README.*=33:*.txt=34:*.md=34:ln=01;36:di=01;34:ex=01;32:"
Having all entries on a single line can make it hard to read. You may
instead divide it to multiple lines in between double quotes by escaping
newlines with backslashes as follows:
export LF_COLORS="\
~/Documents=01;31:\
~/Downloads=01;31:\
~/.local/share=01;31:\
~/.config/lf/lfrc=31:\
.git/=01;32:\
.git=32:\
.gitignore=32:\
Makefile=32:\
README.*=33:\
*.txt=34:\
*.md=34:\
ln=01;36:\
di=01;34:\
ex=01;32:\
"
Having such a long variable definition in a shell configuration file might
be undesirable. You may instead put this definition in a separate file and
source it in your shell configuration file as follows:
[ -f "/path/to/colors" ] && source "/path/to/colors"
See the wiki page for ansi escape codes
https://en.wikipedia.org/wiki/ANSI_escape_code.
Icons
Icons are configured using 'LF_ICONS' environment variable. This variable
uses the same syntax as 'LS_COLORS/LF_COLORS'. Instead of colors, you should
put a single characters as values of entries. Do not forget to enable
'icons' option to see the icons. Default values are as follows given with
their matching order in lf:
ln 🗎
or 🗎
tw 🗀
ow 🗀
st 🗀
di 🗀
pi 🗎
so 🗎
bd 🗎
cd 🗎
su 🗎
sg 🗎
ex 🗎
fi 🗎
See the wiki page for an example icons configuration
https://github.com/gokcehan/lf/wiki/Icons.
` | when 'info' has a 'size' field and the pane is wide enough to show the
information. A thousand items are counted per directory at most, and bigger
directories are shown as '999+'. |
runTest.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import * as path from 'path'; | import { runTests } from 'vscode-test';
async function main() {
try {
// The folder containing the Extension Manifest package.json
// Passed to `--extensionDevelopmentPath`
const extensionDevelopmentPath = path.resolve(__dirname, '../../');
// The path to test runner
// Passed to --extensionTestsPath
const extensionTestsPath = path.resolve(__dirname, './index');
// Download VS Code, unzip it and run the integration test
await runTests({
extensionDevelopmentPath,
extensionTestsPath,
launchArgs: [
'--enable-proposed-api=ms-vscode.vscode-github-issue-notebooks',
'--disable-extensions'
],
// version: 'insiders'
});
} catch (err) {
console.error('Failed to run tests');
process.exit(1);
}
}
main(); | |
counter_20211124214951.js | function getDayOfYear() {
let timeSinceEpoch = (Date.now() - Date.parse(new Date().getFullYear(), 0, 0));
return Math.floor(timeSinceEpoch / 86400000);
}
module.exports = class Counter {
constructor(state, env) {
this.state = state;
this.env = env;
this.state.blockConcurrencyWhile(async () => {
let storedViews = await this.state.storage.get("views");
let storedLogs = await this.state.storage.get("logs");
this.views = storedViews || 0;
this.logs = storedLogs || [];
});
}
async fetch(request) {
let url = new URL(request.url);
let currentViews = this.views;
switch (url.pathname) {
case "/increment": | timestamp: Date.now(),
headers: Object.fromEntries(request.headers),
};
Object.entries(request.cf).forEach((entry) => {
if(entry && entry[1]?.length) {
res[entry[0]] = entry[1]
};
});
this.logs.push(res);
break;
case "/decrement":
currentViews = --this.views;
await this.state.storage.put("views", this.views);
break;
case "/get":
currentLogs = Object.entries(this.logs);
break;
case "/":
break;
default:
return new Response("Not found", {
status: 404
});
}
const respData = {
dayOfYear: getDayOfYear(),
views: currentViews,
currentLogs: this.logs
};
const json = JSON.stringify(respData, null, 2)
console.log(json);
return new Response(json, {
headers: {
"content-type": "application/json;charset=UTF-8"
} }, {
status: 201
});
}
} | currentViews = ++this.views;
await this.state.storage.put("views", this.views);
let res = { |
getNetworkWatcher.go | // *** WARNING: this file was generated by the Pulumi SDK Generator. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package v20170601
import (
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
// Network watcher in a resource group.
func LookupNetworkWatcher(ctx *pulumi.Context, args *LookupNetworkWatcherArgs, opts ...pulumi.InvokeOption) (*LookupNetworkWatcherResult, error) {
var rv LookupNetworkWatcherResult
err := ctx.Invoke("azure-native:network/v20170601:getNetworkWatcher", args, &rv, opts...)
if err != nil |
return &rv, nil
}
type LookupNetworkWatcherArgs struct {
// The name of the network watcher.
NetworkWatcherName string `pulumi:"networkWatcherName"`
// The name of the resource group.
ResourceGroupName string `pulumi:"resourceGroupName"`
}
// Network watcher in a resource group.
type LookupNetworkWatcherResult struct {
// A unique read-only string that changes whenever the resource is updated.
Etag *string `pulumi:"etag"`
// Resource ID.
Id *string `pulumi:"id"`
// Resource location.
Location *string `pulumi:"location"`
// Resource name.
Name string `pulumi:"name"`
// The provisioning state of the resource.
ProvisioningState string `pulumi:"provisioningState"`
// Resource tags.
Tags map[string]string `pulumi:"tags"`
// Resource type.
Type string `pulumi:"type"`
}
| {
return nil, err
} |
s3.rs | // Copyright Materialize, Inc. All rights reserved.
//
// Use of this software is governed by the Business Source License
// included in the LICENSE file.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0.
use std::cmp;
use std::collections::HashMap;
use std::default::Default;
use std::io::Write;
use std::time::{Duration, Instant};
use async_trait::async_trait;
use flate2::write::GzEncoder;
use flate2::Compression as Flate2Compression;
use rusoto_core::{ByteStream, RusotoError};
use rusoto_s3::{
CreateBucketConfiguration, CreateBucketError, CreateBucketRequest,
GetBucketNotificationConfigurationRequest, PutBucketNotificationConfigurationRequest,
PutObjectRequest, QueueConfiguration, S3,
};
use rusoto_sqs::{
CreateQueueError, CreateQueueRequest, DeleteMessageBatchRequest,
DeleteMessageBatchRequestEntry, GetQueueAttributesRequest, GetQueueUrlRequest,
ReceiveMessageRequest, SetQueueAttributesRequest, Sqs,
};
use crate::action::file::{build_compression, Compression};
use crate::action::{Action, State};
use crate::parser::BuiltinCommand;
pub struct CreateBucketAction {
bucket: String,
}
pub fn build_create_bucket(mut cmd: BuiltinCommand) -> Result<CreateBucketAction, String> {
let bucket = cmd.args.string("bucket")?;
cmd.args.done()?;
Ok(CreateBucketAction { bucket })
}
#[async_trait]
impl Action for CreateBucketAction {
async fn undo(&self, _state: &mut State) -> Result<(), String> {
Ok(())
}
async fn redo(&self, state: &mut State) -> Result<(), String> {
println!("Creating S3 Bucket {}", self.bucket);
match state
.s3_client
.create_bucket(CreateBucketRequest {
bucket: self.bucket.clone(),
create_bucket_configuration: Some(CreateBucketConfiguration {
location_constraint: Some(state.aws_region.name().to_string()),
}),
..Default::default()
})
.await
{
Ok(_) | Err(RusotoError::Service(CreateBucketError::BucketAlreadyOwnedByYou(_))) => {
state.s3_buckets_created.insert(self.bucket.clone());
Ok(())
}
Err(e) => Err(format!("creating bucket: {}", e)),
}
}
}
pub struct PutObjectAction { | bucket: String,
key: String,
compression: Compression,
contents: String,
}
pub fn build_put_object(mut cmd: BuiltinCommand) -> Result<PutObjectAction, String> {
let bucket = cmd.args.string("bucket")?;
let key = cmd.args.string("key")?;
let compression = build_compression(&mut cmd)?;
let contents = cmd.input.join("\n");
cmd.args.done()?;
Ok(PutObjectAction {
bucket,
key,
compression,
contents,
})
}
#[async_trait]
impl Action for PutObjectAction {
async fn undo(&self, _state: &mut State) -> Result<(), String> {
Ok(())
}
async fn redo(&self, state: &mut State) -> Result<(), String> {
println!("Creating S3 Bucket {}", self.bucket);
let buffer = self.contents.clone().into_bytes();
let contents = match self.compression {
Compression::None => Ok(buffer),
Compression::Gzip => {
let mut encoder = GzEncoder::new(Vec::new(), Flate2Compression::default());
encoder
.write_all(buffer.as_ref())
.map_err(|e| format!("error writing bytes to encoder: {}", e))?;
encoder
.finish()
.map_err(|e| format!("error compressing contents: {}", e))
}
}?;
state
.s3_client
.put_object(PutObjectRequest {
bucket: self.bucket.clone(),
body: Some(ByteStream::from(contents)),
content_type: Some("application/octet-stream".to_string()),
content_encoding: match self.compression {
Compression::None => None,
Compression::Gzip => Some("gzip".to_string()),
},
key: self.key.clone(),
..Default::default()
})
.await
.map(|_| ())
.map_err(|e| format!("putting s3 object: {}", e))
}
}
pub struct AddBucketNotifications {
bucket: String,
events: Vec<String>,
queue: String,
bucket_prefix: String,
sqs_test_prefix: String,
sqs_validation_timeout: Option<Duration>,
}
pub fn build_add_notifications(mut cmd: BuiltinCommand) -> Result<AddBucketNotifications, String> {
let events = cmd
.args
.opt_string("events")
.map(|a| a.split(',').map(|s| s.to_string()).collect())
.unwrap_or_else(|| vec!["s3:ObjectCreated:*".to_string()]);
let bucket_prefix = cmd
.args
.opt_string("bucket_prefix")
.unwrap_or_else(|| "materialize-ci-*".into());
let bucket = cmd.args.string("bucket")?;
let queue = cmd.args.string("queue")?;
let sqs_test_prefix = cmd
.args
.opt_string("sqs-test-prefix")
.unwrap_or_else(|| "sqs-test".into());
let sqs_validation_timeout = cmd
.args
.opt_string("sqs-validation-timeout")
.map(|t| parse_duration::parse(&t).map_err(|e| e.to_string()))
.transpose()?;
cmd.args.done()?;
Ok(AddBucketNotifications {
bucket,
events,
queue,
bucket_prefix,
sqs_test_prefix,
sqs_validation_timeout,
})
}
#[async_trait]
impl Action for AddBucketNotifications {
async fn undo(&self, _state: &mut State) -> Result<(), String> {
Ok(())
}
async fn redo(&self, state: &mut State) -> Result<(), String> {
let result = state
.sqs_client
.create_queue(CreateQueueRequest {
queue_name: self.queue.clone(),
..Default::default()
})
.await;
// get queue properties used for the rest of the mutations
let queue_url = match result {
Ok(r) => r
.queue_url
.expect("queue creation should always return the url"),
Err(RusotoError::Service(CreateQueueError::QueueNameExists(q))) => {
let resp = state
.sqs_client
.get_queue_url(GetQueueUrlRequest {
queue_name: q,
queue_owner_aws_account_id: None,
})
.await
.map_err(|e| {
format!(
"when trying to get sqs queue url for already-existing queue: {}",
e
)
})?;
resp.queue_url
.expect("successfully getting the url gets the url")
}
Err(e) => return Err(e.to_string()),
};
let queue_arn = state
.sqs_client
.get_queue_attributes(GetQueueAttributesRequest {
attribute_names: Some(vec!["QueueArn".to_string()]),
queue_url: queue_url.clone(),
})
.await
.map_err(|e| format!("getting queue {} attributes: {}", self.queue, e))?
.attributes
.ok_or_else(|| "the result should not be empty".to_string())?
.remove("QueueArn")
.ok_or_else(|| "QueueArn should be present in arn request".to_string())?;
// Configure the queue to allow the S3 bucket to write to this queue
let mut attributes = HashMap::new();
attributes.insert(
"Policy".to_string(),
allow_s3_policy(&queue_arn, &self.bucket_prefix, &state.aws_account),
);
state
.sqs_client
.set_queue_attributes(SetQueueAttributesRequest {
queue_url: queue_url.clone(),
attributes,
})
.await
.map_err(|e| format!("setting aws queue attributes: {}", e))?;
state.sqs_queues_created.insert(queue_url.clone());
// Configure the s3 bucket to write to the queue, without overwriting any existing configs
let mut config = state
.s3_client
.get_bucket_notification_configuration(GetBucketNotificationConfigurationRequest {
bucket: self.bucket.clone(),
..Default::default()
})
.await
.map_err(|e| format!("getting bucket notification_configuration: {}", e))?;
{
let queue_configs = config.queue_configurations.get_or_insert_with(Vec::new);
queue_configs.push(QueueConfiguration {
events: self.events.clone(),
queue_arn,
..Default::default()
});
}
state
.s3_client
.put_bucket_notification_configuration(PutBucketNotificationConfigurationRequest {
bucket: self.bucket.clone(),
notification_configuration: config,
..Default::default()
})
.await
.map_err(|e| {
format!(
"Putting s3 bucket configuration notification {} \n{:?}",
e, e
)
})?;
let sqs_validation_timeout = self
.sqs_validation_timeout
.unwrap_or_else(|| cmp::max(state.default_timeout, Duration::from_secs(120)));
// Wait until we are sure that the configuration has taken effect
//
// AWS doesn't specify anywhere how long it should take for
// newly-configured buckets to start generating sqs notifications, so
// we continuously put new objects into the bucket and wait for any
// message to show up.
let mut attempts = 0;
let mut success = false;
print!(
"Verifying SQS notification configuration for up to {:?} ",
sqs_validation_timeout
);
let start = Instant::now();
while start.elapsed() < sqs_validation_timeout {
state
.s3_client
.put_object(PutObjectRequest {
bucket: self.bucket.clone(),
body: Some(Vec::new().into()),
key: format!("{}/{}", self.sqs_test_prefix, attempts),
..Default::default()
})
.await
.map_err(|e| format!("creating object to verify sqs: {}", e))?;
attempts += 1;
let resp = state
.sqs_client
.receive_message(ReceiveMessageRequest {
queue_url: queue_url.clone(),
wait_time_seconds: Some(1),
..Default::default()
})
.await
.map_err(|e| format!("reading from sqs for verification: {}", e))?;
if let Some(ms) = resp.messages {
if !ms.is_empty() {
let found_real_message = ms
.iter()
.any(|m| m.body.as_ref().unwrap().contains("ObjectCreated:Put"));
if found_real_message {
success = true;
}
state
.sqs_client
.delete_message_batch(DeleteMessageBatchRequest {
queue_url: queue_url.to_string(),
entries: ms
.into_iter()
.enumerate()
.map(|(i, m)| DeleteMessageBatchRequestEntry {
id: i.to_string(),
receipt_handle: m.receipt_handle.unwrap(),
})
.collect(),
})
.await
.map_err(|e| format!("Deleting validation messages from sqs: {}", e))?;
}
}
if success {
break;
}
print!(".");
}
if success {
println!(
" Success! (in {} attempts and {:?})",
attempts + 1,
start.elapsed()
);
Ok(())
} else {
println!(
" Error, never got messages (after {} attempts and {:?})",
attempts + 1,
start.elapsed()
);
Err("Never got messages on S3 bucket notification queue".to_string())
}
}
}
fn allow_s3_policy(queue_arn: &str, bucket_prefix: &str, self_account: &str) -> String {
format!(
r#"{{
"Version": "2012-10-17",
"Id": "AllowS3Pushing",
"Statement": [
{{
"Sid": "AllowS3Pushing",
"Effect": "Allow",
"Principal": {{
"AWS":"*"
}},
"Action": [
"SQS:SendMessage"
],
"Resource": "{queue_arn}",
"Condition": {{
"ArnLike": {{ "aws:SourceArn": "arn:aws:s3:*:*:{bucket_prefix}" }},
"StringEquals": {{ "aws:SourceAccount": "{self_account}" }}
}}
}}
]
}}"#,
queue_arn = queue_arn,
bucket_prefix = bucket_prefix,
self_account = self_account
)
} | |
local.go | /*
Copyright 2018 The Kubernetes Authors. |
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package exec
import (
"bytes"
"io"
osexec "os/exec"
"sync"
"sigs.k8s.io/kind/pkg/errors"
"sigs.k8s.io/kind/pkg/globals"
)
// LocalCmd wraps os/exec.Cmd, implementing the kind/pkg/exec.Cmd interface
type LocalCmd struct {
*osexec.Cmd
}
var _ Cmd = &LocalCmd{}
// LocalCmder is a factory for LocalCmd, implementing Cmder
type LocalCmder struct{}
var _ Cmder = &LocalCmder{}
// Command returns a new exec.Cmd backed by Cmd
func (c *LocalCmder) Command(name string, arg ...string) Cmd {
return &LocalCmd{
Cmd: osexec.Command(name, arg...),
}
}
// SetEnv sets env
func (cmd *LocalCmd) SetEnv(env ...string) Cmd {
cmd.Env = env
return cmd
}
// SetStdin sets stdin
func (cmd *LocalCmd) SetStdin(r io.Reader) Cmd {
cmd.Stdin = r
return cmd
}
// SetStdout set stdout
func (cmd *LocalCmd) SetStdout(w io.Writer) Cmd {
cmd.Stdout = w
return cmd
}
// SetStderr sets stderr
func (cmd *LocalCmd) SetStderr(w io.Writer) Cmd {
cmd.Stderr = w
return cmd
}
// Run runs the command
// If the returned error is non-nil, it should be of type *RunError
func (cmd *LocalCmd) Run() error {
// Background:
// Go's stdlib will setup and use a shared fd when cmd.Stderr == cmd.Stdout
// In any other case, it will use different fds, which will involve
// two different io.Copy goroutines writing to cmd.Stderr and cmd.Stdout
//
// Given this, we must synchronize capturing the output to a buffer
// IFF ! interfaceEqual(cmd.Sterr, cmd.Stdout)
var combinedOutput bytes.Buffer
var combinedOutputWriter io.Writer = &combinedOutput
if cmd.Stdout == nil && cmd.Stderr == nil {
// Case 1: If stdout and stderr are nil, we can just use the buffer
// The buffer will be == and Go will use one fd / goroutine
cmd.Stdout = combinedOutputWriter
cmd.Stderr = combinedOutputWriter
} else if interfaceEqual(cmd.Stdout, cmd.Stderr) {
// Case 2: If cmd.Stdout == cmd.Stderr go will still share the fd,
// but we need to wrap with a MultiWriter to respect the other writer
// and our buffer.
// The MultiWriter will be == and Go will use one fd / goroutine
cmd.Stdout = io.MultiWriter(cmd.Stdout, combinedOutputWriter)
cmd.Stderr = cmd.Stdout
} else {
// Case 3: If cmd.Stdout != cmd.Stderr, we need to synchronize the
// combined output writer.
// Go will use different fds / write routines for stdout and stderr
combinedOutputWriter = &mutexWriter{
writer: &combinedOutput,
}
// wrap writers if non-nil
if cmd.Stdout != nil {
cmd.Stdout = io.MultiWriter(cmd.Stdout, combinedOutputWriter)
} else {
cmd.Stdout = combinedOutputWriter
}
if cmd.Stderr != nil {
cmd.Stderr = io.MultiWriter(cmd.Stderr, combinedOutputWriter)
} else {
cmd.Stderr = combinedOutputWriter
}
}
// TODO: should be in the caller or logger should be injected somehow ...
globals.GetLogger().V(3).Infof("Running: \"%s\"", PrettyCommand(cmd.Args[0], cmd.Args[1:]...))
if err := cmd.Cmd.Run(); err != nil {
return errors.WithStack(&RunError{
Command: cmd.Args,
Output: combinedOutput.Bytes(),
Inner: err,
})
}
return nil
}
// interfaceEqual protects against panics from doing equality tests on
// two interfaces with non-comparable underlying types.
// This trivial is borrowed from the go stdlib in os/exec
// Note that the recover will only happen if a is not comparable to b,
// in which case we'll return false
// We've lightly modified this to pass errcheck (explicitly ignoring recover)
func interfaceEqual(a, b interface{}) bool {
defer func() {
_ = recover()
}()
return a == b
}
// mutexWriter is a simple synchronized wrapper around an io.Writer
type mutexWriter struct {
writer io.Writer
mu sync.Mutex
}
func (m *mutexWriter) Write(b []byte) (int, error) {
m.mu.Lock()
defer m.mu.Unlock()
n, err := m.writer.Write(b)
return n, err
} | |
stat_object.rs | // THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY!
//
#[derive(Debug, Clone, Default)]
#[doc = "调用 API 所用的路径参数"]
pub struct PathParams {
r#entry: Option<std::borrow::Cow<'static, str>>,
extended_segments: Vec<std::borrow::Cow<'static, str>>,
}
impl PathParams {
#[inline]
#[must_use]
#[doc = "追加新的路径段"]
pub fn push_segment(mut self, segment: impl Into<std::borrow::Cow<'static, str>>) -> Self {
self.extended_segments.push(segment.into());
self
}
fn build(self) -> Vec<std::borrow::Cow<'static, str>> {
let mut all_segments: Vec<_> = Default::default();
if let Some(segment) = self.r#entry {
all_segments.push(segment);
}
all_segments.extend(self.extended_segments);
all_segments
}
}
impl PathParams {
#[inline]
#[must_use]
#[doc = "指定目标对象空间与目标对象名称"]
pub fn set_entry_as_str(mut self, value: impl Into<std::borrow::Cow<'static, str>>) -> Self {
self.r#entry = Some(qiniu_utils::base64::urlsafe(value.into().as_bytes()).into());
self
}
}
#[derive(Debug, Clone, Default)]
#[doc = "调用 API 所用的 URL 查询参数"]
pub struct QueryParams<'a> {
map: indexmap::IndexMap<qiniu_http_client::QueryPairKey<'a>, qiniu_http_client::QueryPairValue<'a>>,
}
impl<'a> QueryParams<'a> {
#[inline]
#[must_use]
#[doc = "插入一个新的查询参数对"]
pub fn insert(
mut self,
query_pair_key: qiniu_http_client::QueryPairKey<'a>,
query_pair_value: qiniu_http_client::QueryPairValue<'a>,
) -> Self {
self.map.insert(query_pair_key, query_pair_value);
self
}
fn build(self) -> Vec<qiniu_http_client::QueryPair<'a>> {
Vec::from_iter(self.map)
}
}
impl<'a> From<QueryParams<'a>> for Vec<qiniu_http_client::QueryPair<'a>> {
#[inline]
fn from(map: QueryParams<'a>) -> Self {
map.build()
}
}
impl<'a> QueryParams<'a> {
#[inline]
#[must_use]
#[doc = "如果文件是通过分片上传的,是否返回对应的分片信息"]
pub fn set_need_parts_as_bool(self, value: bool) -> Self {
self.insert("needparts".into(), value.to_string().into())
}
}
#[derive(Clone, Debug, serde :: Serialize, serde :: Deserialize)]
#[serde(transparent)]
#[doc = "获取 API 所用的响应体参数"]
pub struct ResponseBody(serde_json::Value);
impl ResponseBody {
#[allow(dead_code)]
pub(crate) fn new(value: serde_json::Value) -> Self {
Self(value)
}
}
impl Default for ResponseBody {
#[inline]
fn default() -> Self {
Self(serde_json::Value::Object(Default::default()))
}
}
impl From<ResponseBody> for serde_json::Value {
#[inline]
fn from(val: ResponseBody) -> Self {
val.0
}
}
impl AsRef<serde_json::Value> for ResponseBody {
#[inline]
fn as_ref(&self) -> &serde_json::Value {
&self.0
}
}
impl AsMut<serde_json::Value> for ResponseBody {
#[inline]
fn as_mut(&mut self) -> &mut serde_json::Value {
&mut self.0
}
}
impl ResponseBody {
#[doc = "获取 对象大小,单位为字节"]
pub fn get_size_as_i64(&self) -> i64 {
self.0.as_object().unwrap().get("fsize").unwrap().as_i64().unwrap()
}
}
impl ResponseBody {
#[doc = "设置 对象大小,单位为字节"]
pub fn set_size_as_i64(&mut self, new: i64) -> Option<i64> {
self.0
.as_object_mut()
.unwrap()
.insert("fsize".to_owned(), new.into())
.and_then(|val| val.as_i64())
}
}
impl ResponseBody {
#[doc = "获取 对象大小,单位为字节"]
pub fn get_size_as_u64(&self) -> u64 {
self.0.as_object().unwrap().get("fsize").unwrap().as_u64().unwrap()
}
}
impl ResponseBody {
#[doc = "设置 对象大小,单位为字节"]
pub fn set_size_as_u64(&mut self, new: u64) -> Option<u64> {
self.0
.as_object_mut()
.unwrap()
.insert("fsize".to_owned(), new.into())
.and_then(|val| val.as_u64())
}
}
impl ResponseBody {
#[doc = "获取 对象哈希值"]
pub fn get_hash_as_str(&self) -> &str {
self.0.as_object().unwrap().get("hash").unwrap().as_str().unwrap()
}
}
impl ResponseBody {
#[doc = "设置 对象哈希值"]
pub fn set_hash_as_str(&mut self, new: String) -> Option<String> {
self.0
.as_object_mut()
.unwrap()
.insert("hash".to_owned(), new.into())
.and_then(|val| match val {
serde_json::Value::String(s) => Some(s),
_ => None,
})
}
}
impl ResponseBody {
#[doc = "获取 对象 MIME 类型"]
pub fn get_mime_type_as_str(&self) -> &str {
self.0.as_object().unwrap().get("mimeType").unwrap().as_str().unwrap()
}
}
impl ResponseBody {
#[doc = "设置 对象 MIME 类型"]
pub fn set_mime_type_as_str(&mut self, new: String) -> Option<String> {
self.0
.as_object_mut()
.unwrap()
.insert("mimeType".to_owned(), new.into())
.and_then(|val| match val {
serde_json::Value::String(s) => Some(s),
_ => None,
})
}
}
impl ResponseBody {
#[doc = "获取 对象存储类型,`0` 表示普通存储,`1` 表示低频存储,`2` 表示归档存储"]
pub fn get_type_as_i64(&self) -> i64 {
self.0.as_object().unwrap().get("type").unwrap().as_i64().unwrap()
}
}
impl ResponseBody {
#[doc = "设置 对象存储类型,`0` 表示普通存储,`1` 表示低频存储,`2` 表示归档存储"]
pub fn set_type_as_i64(&mut self, new: i64) -> Option<i64> {
self.0
.as_object_mut()
.unwrap()
.insert("type".to_owned(), new.into())
.and_then(|val| val.as_i64())
}
}
impl ResponseBody {
#[doc = "获取 对象存储类型,`0` 表示普通存储,`1` 表示低频存储,`2` 表示归档存储"]
pub fn get_type_as_u64(&self) -> u64 {
self.0.as_object().unwrap().get("type").unwrap().as_u64().unwrap()
}
}
impl ResponseBody {
#[doc = "设置 对象存储类型,`0` 表示普通存储,`1` 表示低频存储,`2` 表示归档存储"]
pub fn set_type_as_u64(&mut self, new: u64) -> Option<u64> {
self.0
.as_object_mut()
.unwrap()
.insert("type".to_owned(), new.into())
.and_then(|val| val.as_u64())
}
}
impl ResponseBody {
#[doc = "获取 文件上传时间,UNIX 时间戳格式,单位为 100 纳秒"]
pub fn get_put_time_as_i64(&self) -> i64 {
self.0.as_object().unwrap().get("putTime").unwrap().as_i64().unwrap()
}
}
impl ResponseBody {
#[doc = "设置 文件上传时间,UNIX 时间戳格式,单位为 100 纳秒"]
pub fn set_put_time_as_i64(&mut self, new: i64) -> Option<i64> {
self.0
.as_object_mut()
.unwrap()
.insert("putTime".to_owned(), new.into())
.and_then(|val| val.as_i64())
}
}
impl ResponseBody {
#[doc = "获取 文件上传时间,UNIX 时间戳格式,单位为 100 纳秒"]
pub fn get_put_time_as_u64(&self) -> u64 {
self.0.as_object().unwrap().get("putTime").unwrap().as_u64().unwrap()
}
}
impl ResponseBody {
#[doc = "设置 文件上传时间,UNIX 时间戳格式,单位为 100 纳秒"]
pub fn set_put_time_as_u64(&mut self, new: u64) -> Option<u64> {
self.0
.as_object_mut()
.unwrap()
.insert("putTime".to_owned(), new.into())
.and_then(|val| val.as_u64())
}
}
impl ResponseBody {
#[doc = "获取 归档存储文件的解冻状态,`2` 表示解冻完成,`1` 表示解冻中;归档文件冻结时,不返回该字段"]
pub fn get_unfreezing_status_as_i64(&self) -> Option<i64> {
self.0
.as_object()
.and_then(|obj| obj.get("restoreStatus"))
.and_then(|val| val.as_i64())
}
}
impl ResponseBody {
#[doc = "设置 归档存储文件的解冻状态,`2` 表示解冻完成,`1` 表示解冻中;归档文件冻结时,不返回该字段"]
pub fn set_unfreezing_status_as_i64(&mut self, new: i64) -> Option<i64> {
self.0.as_object_mut().and_then(|object| {
object
.insert("restoreStatus".to_owned(), new.into())
.and_then(|val| val.as_i64())
})
}
}
impl ResponseBody {
#[doc = "获取 归档存储文件的解冻状态,`2` 表示解冻完成,`1` 表示解冻中;归档文件冻结时,不返回该字段"]
pub fn get_unfreezing_status_as_u64(&self) -> Option<u64> {
self.0
.as_object()
.and_then(|obj| obj.get("restoreStatus"))
.and_then(|val| val.as_u64())
}
}
impl ResponseBody {
#[doc = "设置 归档存储文件的解冻状态,`2` 表示解冻完成,`1` 表示解冻中;归档文件冻结时,不返回该字段"]
pub fn set_unfreezing_status_as_u64(&mut self, new: u64) -> Option<u64> {
self.0.as_object_mut().and_then(|object| {
object
.insert("restoreStatus".to_owned(), new.into())
.and_then(|val| val.as_u64())
})
}
}
impl ResponseBody {
#[doc = "获取 文件状态。`1` 表示禁用;只有禁用状态的文件才会返回该字段"]
pub fn get_status_as_i64(&self) -> Option<i64> {
self.0
.as_object()
.and_then(|obj| obj.get("status"))
.and_then(|val| val.as_i64())
}
}
impl ResponseBody {
#[doc = "设置 文件状态。`1` 表示禁用;只有禁用状态的文件才会返回该字段"]
pub fn set_status_as_i64(&mut self, new: i64) -> Option<i64> {
self.0.as_object_mut().and_then(|object| {
object
.insert("status".to_owned(), new.into())
.and_then(|val| val.as_i64())
})
}
}
impl ResponseBody {
#[doc = "获取 文件状态。`1` 表示禁用;只有禁用状态的文件才会返回该字段"]
pub fn get_status_as_u64(&self) -> Option<u64> {
self.0
.as_object()
.and_then(|obj| obj.get("status"))
.and_then(|val| val.as_u64())
}
}
impl ResponseBody {
#[doc = "设置 文件状态。`1` 表示禁用;只有禁用状态的文件才会返回该字段"]
pub fn set_status_as_u64(&mut self, new: u64) -> Option<u64> {
self.0.as_object_mut().and_then(|object| {
object
.insert("status".to_owned(), new.into())
.and_then(|val| val.as_u64())
})
}
}
impl ResponseBody {
#[doc = "获取 对象 MD5 值,只有通过直传文件和追加文件 API 上传的文件,服务端确保有该字段返回"]
pub fn get_md_5_as_str(&self) -> Option<&str> {
self.0
.as_object()
.and_then(|obj| obj.get("md5"))
.and_then(|val| val.as_str())
}
}
impl ResponseBody {
#[doc = "设置 对象 MD5 值,只有通过直传文件和追加文件 API 上传的文件,服务端确保有该字段返回"]
pub fn set_md_5_as_str(&mut self, new: String) -> Option<String> {
self.0.as_object_mut().and_then(|object| {
object.insert("md5".to_owned(), new.into()).and_then(|val| match val {
serde_json::Value::String(s) => Some(s),
_ => None,
})
})
}
}
impl ResponseBody {
#[doc = "获取 文件过期删除日期,UNIX 时间戳格式,文件在设置过期时间后才会返回该字段"]
pub fn get_expiration_time_as_i64(&self) -> Option<i64> {
self.0
.as_object()
.and_then(|obj| obj.get("expiration"))
.and_then(|val| val.as_i64())
}
}
impl ResponseBody {
#[doc = "设置 文件过期删除日期,UNIX 时间戳格式,文件在设置过期时间后才会返回该字段"]
pub fn set_expiration_time_as_i64(&mut self, new: i64) -> Option<i64> {
self.0.as_object_mut().and_then(|object| {
object
.insert("expiration".to_owned(), new.into())
.and_then(|val| val.as_i64())
})
}
}
impl ResponseBody {
#[doc = "获取 文件过期删除日期,UNIX 时间戳格式,文件在设置过期时间后才会返回该字段"]
pub fn get_expiration_time_as_u64(&self) -> Option<u64> {
self.0
.as_object()
.and_then(|obj| obj.get("expiration"))
.and_then(|val| val.as_u64())
}
}
impl ResponseBody {
#[doc = "设置 文件过期删除日期,UNIX 时间戳格式,文件在设置过期时间后才会返回该字段"]
pub fn set_expiration_time_as_u64(&mut self, new: u64) -> Option<u64> {
self.0.as_object_mut().and_then(|object| {
object
.insert("expiration".to_owned(), new.into())
.and_then(|val| val.as_u64())
})
}
}
impl ResponseBody {
#[doc = "获取 文件生命周期中转为低频存储的日期,UNIX 时间戳格式,文件在设置转低频后才会返回该字段"]
pub fn get_transition_to_ia_time_as_i64(&self) -> Option<i64> {
self.0
.as_object()
.and_then(|obj| obj.get("transitionToIA"))
.and_then(|val| val.as_i64())
}
}
impl ResponseBody {
#[doc = "设置 文件生命周期中转为低频存储的日期,UNIX 时间戳格式,文件在设置转低频后才会返回该字段"]
pub fn set_transition_to_ia_time_as_i64(&mut self, new: i64) -> Option<i64> {
self.0.as_object_mut().and_then(|object| {
object
.insert("transitionToIA".to_owned(), new.into())
.and_then(|val| val.as_i64())
})
}
}
impl ResponseBody {
#[doc = "获取 文件生命周期中转为低频存储的日期,UNIX 时间戳格式,文件在设置转低频后才会返回该字段"]
pub fn get_transition_to_ia_time_as_u64(&self) -> Option<u64> {
self.0
.as_object()
.and_then(|obj| obj.get("transitionToIA"))
.and_then(|val| val.as_u64())
}
}
impl ResponseBody {
#[doc = "设置 文件生命周期中转为低频存储的日期,UNIX 时间戳格式,文件在设置转低频后才会返回该字段"]
pub fn set_transition_to_ia_time_as_u64(&mut self, new: u64) -> Option<u64> {
self.0.as_object_mut().and_then(|object| {
object
.insert("transitionToIA".to_owned(), new.into())
.and_then(|val| val.as_u64())
})
}
}
impl ResponseBody {
#[doc = "获取 文件生命周期中转为归档存储的日期,UNIX 时间戳格式,文件在设置转归档后才会返回该字段"]
pub fn get_transition_to_archive_time_as_i64(&self) -> Option<i64> {
self.0
.as_object()
.and_then(|obj| obj.get("transitionToARCHIVE"))
.and_then(|val| val.as_i64())
}
}
impl ResponseBody {
#[doc = "设置 文件生命周期中转为归档存储的日期,UNIX 时间戳格式,文件在设置转归档后才会返回该字段"]
pub fn set_transition_to_archive_time_as_i64(&mut self, new: i64) -> Option<i64> {
self.0.as_object_mut().and_then(|object| {
object
.insert("transitionToARCHIVE".to_owned(), new.into())
.and_then(|val| val.as_i64())
})
}
}
impl ResponseBody {
#[doc = "获取 文件生命周期中转为归档存储的日期,UNIX 时间戳格式,文件在设置转归档后才会返回该字段"]
pub fn get_transition_to_archive_time_as_u64(&self) -> Option<u64> {
self.0
.as_object()
.and_then(|obj| obj.get("transitionToARCHIVE"))
.and_then(|val| val.as_u64())
}
}
impl ResponseBody {
#[doc = "设置 文件生命周期中转为归档存储的日期,UNIX 时间戳格式,文件在设置转归档后才会返回该字段"]
pub fn set_transition_to_archive_time_as_u64(&mut self, new: u64) -> Option<u64> {
self.0.as_object_mut().and_then(|object| {
object
.insert("transitionToARCHIVE".to_owned(), new.into())
.and_then(|val| val.as_u64())
})
}
}
#[derive(Clone, Debug, serde :: Serialize, serde :: Deserialize)]
#[serde(transparent)]
#[doc = "每个分片的大小"]
pub struct PartSizes(serde_json::Value);
impl PartSizes {
#[allow(dead_code)]
pub(crate) fn new(value: serde_json::Value) -> Self {
Self(value)
}
}
impl Default for PartSizes {
#[inline]
fn default() -> Self {
Self(serde_json::Value::Array(Default::default()))
}
}
impl From<PartSizes> for serde_json::Value {
#[inline]
fn from(val: PartSizes) -> Self {
val.0
}
}
impl AsRef<serde_json::Value> for PartSizes {
#[inline]
fn as_ref(&self) -> &serde_json::Value {
&self.0
}
}
impl AsMut<serde_json::Value> for PartSizes {
#[inline]
fn as_mut(&mut self) -> &mut serde_json::Value {
&mut self.0
}
}
impl PartSizes {
#[doc = "获取数组的长度"]
pub fn len(&self) -> usize {
self.0.as_array().unwrap().len()
}
#[doc = "数组是否为空"]
pub fn is_empty(&self) -> bool {
self.0.as_array().unwrap().is_empty()
}
}
impl PartSizes {
#[doc = "解析 JSON 得到整型列表"]
pub fn to_i64_vec(&self) -> Vec<i64> {
self.0
.as_array()
.unwrap()
.iter()
.map(|ele| ele.as_i64().unwrap())
.collect()
}
}
impl PartSizes {
#[doc = "解析 JSON 得到无符号整型列表"]
pub fn to_u64_vec(&self) -> Vec<u64> {
self.0
.as_array()
.unwrap()
.iter()
.map(|ele| ele.as_u64().unwrap())
.collect()
}
}
impl PartSizes {
#[doc = "在列表的指定位置移出 JSON i64 整型"]
pub fn remove_as_i64(&mut self, index: usize) -> Option<i64> {
match self.0.as_array_mut().unwrap().remove(index) {
serde_json::Value::Number(s) => s.as_i64(),
_ => None,
}
}
}
impl PartSizes {
#[doc = "在列表尾部取出 JSON i64 整型"]
pub fn pop_as_i64(&mut self) -> Option<i64> {
self.0.as_array_mut().unwrap().pop().and_then(|val| match val {
serde_json::Value::Number(s) => s.as_i64(),
_ => None,
})
}
}
impl PartSizes {
#[doc = "在列表的指定位置移出 JSON u64 整型"]
pub fn remove_as_u64(&mut self, index: usize) -> Option<u64> {
match self.0.as_array_mut().unwrap().remove(index) {
serde_json::Value::Number(s) => s.as_u64(),
_ => None,
}
}
}
impl PartSizes {
#[doc = "在列表尾部取出 JSON u64 整型"]
pub fn pop_as_u64(&mut self) -> Option<u64> {
self.0.as_array_mut().unwrap().pop().and_then(|val| match val {
serde_json::Value::Number(s) => s.as_u64(),
_ => None,
})
}
}
impl From<Vec<i8>> for PartSizes {
#[inline]
fn from(val: Vec<i8>) -> Self {
Self(serde_json::Value::from(val))
}
}
impl PartSizes {
#[doc = "在列表的指定位置插入 JSON i8 整型"]
pub fn insert_i8(&mut self, index: usize, val: i8) {
self.0.as_array_mut().unwrap().insert(index, val.into());
}
}
impl PartSizes {
#[doc = "在列表尾部追加 JSON i8 整型"]
pub fn push_i8(&mut self, val: i8) {
self.0.as_array_mut().unwrap().push(val.into());
}
}
impl From<Vec<i16>> for PartSizes {
#[inline]
fn from(val: Vec<i16>) -> Self {
Self(serde_json::Value::from(val))
}
}
impl PartSizes {
#[doc = "在列表的指定位置插入 JSON i16 整型"]
pub fn insert_i16(&mut self, index: usize, val: i16) {
self.0.as_array_mut().unwrap().insert(index, val.into());
}
}
impl PartSizes {
#[doc = "在列表尾部追加 JSON i16 整型"]
pub fn push_i16(&mut self, val: i16) {
self.0.as_array_mut().unwrap().push(val.into());
}
}
impl From<Vec<i32>> for PartSizes {
#[inline]
fn from(val: Vec<i32>) -> Self {
Self(serde_json::Value::from(val))
}
}
impl PartSizes {
#[doc = "在列表的指定位置插入 JSON i32 整型"]
pub fn insert_i32(&mut self, index: usize, val: i32) {
self.0.as_array_mut().unwrap().insert(index, val.into());
}
}
impl PartSizes {
#[doc = "在列表尾部追加 JSON i32 整型"]
pub fn push_i32(&mut self, val: i32) {
self.0.as_array_mut().unwrap().push(val.into());
}
}
impl From<Vec<i64>> for PartSizes {
#[inline]
fn from(val: Vec<i64>) -> Self {
Self(serde_json::Value::from(val))
}
}
impl PartSizes {
#[doc = "在列表的指定位置插入 JSON i64 整型"]
pub fn insert_i64(&mut self, index: usize, val: i64) {
self.0.as_array_mut().unwrap().insert(index, val.into());
}
}
impl PartSizes {
#[doc = "在列表尾部追加 JSON i64 整型"]
pub fn push_i64(&mut self, val: i64) {
self.0.as_array_mut().unwrap().push(val.into());
}
}
impl From<Vec<isize>> for PartSizes {
#[inline]
fn from(val: Vec<isize>) -> Self {
Self(serde_json::Value::from(val))
}
}
impl PartSizes {
#[doc = "在列表的指定位置插入 JSON isize 整型"]
pub fn insert_isize(&mut self, index: usize, val: isize) {
self.0.as_array_mut().unwrap().insert(index, val.into());
}
}
impl PartSizes {
#[doc = "在列表尾部追加 JSON isize 整型"]
pub fn push_isize(&mut self, val: isize) {
self.0.as_array_mut().unwrap().push(val.into());
}
}
impl From<Vec<u8>> for PartSizes {
#[inline]
fn from(val: Vec<u8>) -> Self {
Self(serde_json::Value::from(val))
}
}
impl PartSizes {
#[doc = "在列表的指定位置插入 JSON u8 整型"]
pub fn insert_u8(&mut self, index: usize, val: u8) {
self.0.as_array_mut().unwrap().insert(index, val.into());
}
}
impl PartSizes {
#[doc = "在列表尾部追加 JSON u8 整型"]
pub fn push_u8(&mut self, val: u8) {
self.0.as_array_mut().unwrap().push(val.into());
}
}
impl From<Vec<u16>> for PartSizes {
#[inline]
fn from(val: Vec<u16>) -> Self {
Self(serde_json::Value::from(val))
}
}
impl PartSizes {
#[doc = "在列表的指定位置插入 JSON u16 整型"]
pub fn insert_u16(&mut self, index: usize, val: u16) {
self.0.as_array_mut().unwrap().insert(index, val.into());
}
}
impl PartSizes {
#[doc = "在列表尾部追加 JSON u16 整型"]
pub fn push_u16(&mut self, val: u16) {
self.0.as_array_mut().unwrap().push(val.into());
}
}
impl From<Vec<u32>> for PartSizes {
#[inline]
fn from(val: Vec<u32>) -> Self {
Self(serde_json::Value::from(val))
}
}
impl PartSizes {
#[doc = "在列表的指定位置插入 JSON u32 整型"]
pub fn insert_u32(&mut self, index: usize, val: u32) {
self.0.as_array_mut().unwrap().insert(index, val.into());
}
}
impl PartSizes {
#[doc = "在列表尾部追加 JSON u32 整型"]
pub fn push_u32(&mut self, val: u32) {
self.0.as_array_mut().unwrap().push(val.into());
}
}
impl From<Vec<u64>> for PartSizes {
#[inline]
fn from(val: Vec<u64>) -> Self {
Self(serde_json::Value::from(val))
}
}
impl PartSizes {
#[doc = "在列表的指定位置插入 JSON u64 整型"]
pub fn insert_u64(&mut self, index: usize, val: u64) {
self.0.as_array_mut().unwrap().insert(index, val.into());
}
}
impl PartSizes {
#[doc = "在列表尾部追加 JSON u64 整型"]
pub fn push_u64(&mut self, val: u64) {
self.0.as_array_mut().unwrap().push(val.into());
}
}
impl From<Vec<usize>> for PartSizes {
#[inline]
fn from(val: Vec<usize>) -> Self {
Self(serde_json::Value::from(val))
}
}
impl PartSizes {
#[doc = "在列表的指定位置插入 JSON usize 整型"]
pub fn insert_usize(&mut self, index: usize, val: usize) {
self.0.as_array_mut().unwrap().insert(index, val.into());
}
}
impl PartSizes {
#[doc = "在列表尾部追加 JSON usize 整型"]
pub fn push_usize(&mut self, val: usize) {
self.0.as_array_mut().unwrap().push(val.into());
}
}
impl ResponseBody {
#[doc = "获取 每个分片的大小,如没有指定 need_parts 参数则不返回"]
pub fn get_parts(&self) -> Option<PartSizes> {
self.0
.as_object()
.and_then(|obj| obj.get("parts"))
.cloned()
.map(PartSizes::new)
}
}
impl ResponseBody {
#[doc = "设置 每个分片的大小,如没有指定 need_parts 参数则不返回"]
pub fn set_parts(&mut self, new: PartSizes) -> Option<PartSizes> {
self.0
.as_object_mut()
.and_then(|object| object.insert("parts".to_owned(), new.into()).map(PartSizes::new))
}
}
#[doc = "API 调用客户端"]
#[derive(Debug, Clone)]
pub struct Client<'client>(&'client qiniu_http_client::HttpClient);
impl<'client> Client<'client> {
pub(super) fn new(http_client: &'client qiniu_http_client::HttpClient) -> Self {
Self(http_client)
}
}
impl<'client> Client<'client> {
#[inline]
#[doc = "创建一个新的阻塞请求,该方法的异步版本为 [`Self::new_async_request`]"]
pub fn new_request<E: qiniu_http_client::EndpointsProvider + 'client>(
&self,
endpoints_provider: E,
path_params: PathParams,
credential: impl qiniu_http_client::credential::CredentialProvider + Clone + 'client,
) -> SyncRequestBuilder<'client, E> {
RequestBuilder({
let mut builder = self.0.get(&[qiniu_http_client::ServiceName::Rs], endpoints_provider);
builder.authorization(qiniu_http_client::Authorization::v2(credential));
builder.idempotent(qiniu_http_client::Idempotent::Default);
builder.path(crate::base_utils::join_path("/stat", "", path_params.build()));
builder.accept_json();
builder
})
}
#[inline]
#[cfg(feature = "async")]
#[doc = "创建一个新的异步请求"]
pub fn new_async_request<E: qiniu_http_client::EndpointsProvider + 'client>(
&self,
endpoints_provider: E,
path_params: PathParams,
credential: impl qiniu_http_client::credential::CredentialProvider + Clone + 'client,
) -> AsyncRequestBuilder<'client, E> {
RequestBuilder({
let mut builder = self
.0
.async_get(&[qiniu_http_client::ServiceName::Rs], endpoints_provider);
builder.authorization(qiniu_http_client::Authorization::v2(credential));
builder.idempotent(qiniu_http_client::Idempotent::Default);
builder.path(crate::base_utils::join_path("/stat", "", path_params.build()));
builder.accept_json();
builder
})
}
}
#[derive(Debug)]
#[doc = "API 请求构造器"]
pub struct RequestBuilder<'req, B, E>(qiniu_http_client::RequestBuilder<'req, B, E>);
#[doc = "API 阻塞请求构造器"]
pub type SyncRequestBuilder<'req, E> = RequestBuilder<'req, qiniu_http_client::SyncRequestBody<'req>, E>;
#[cfg(feature = "async")]
#[cfg_attr(feature = "docs", doc(cfg(feature = "async")))]
#[doc = "API 异步请求构造器"]
pub type AsyncRequestBuilder<'req, E> = RequestBuilder<'req, qiniu_http_client::AsyncRequestBody<'req>, E>;
impl<'req, B, E> RequestBuilder<'req, B, E> {
#[inline]
#[doc = "设置是否使用 HTTPS"]
pub fn use_https(&mut self, use_https: bool) -> &mut Self {
self.0.use_https(use_https);
self
}
#[inline]
#[doc = "设置 HTTP 协议版本"]
pub fn version(&mut self, version: qiniu_http_client::http::Version) -> &mut Self {
self.0.version(version);
self
}
#[inline]
#[doc = "设置 HTTP 请求头"]
pub fn headers(
&mut self,
headers: impl Into<std::borrow::Cow<'req, qiniu_http_client::http::HeaderMap>>,
) -> &mut Self {
self.0.headers(headers);
self
}
#[inline]
#[doc = "添加 HTTP 请求头"]
pub fn set_header(
&mut self,
header_name: impl qiniu_http_client::http::header::IntoHeaderName,
header_value: impl Into<qiniu_http_client::http::HeaderValue>,
) -> &mut Self {
self.0.set_header(header_name, header_value);
self
}
#[inline]
#[doc = "设置查询参数"]
pub fn query(&mut self, query: impl Into<std::borrow::Cow<'req, str>>) -> &mut Self {
self.0.query(query);
self
}
#[inline]
#[doc = "设置查询参数"]
pub fn query_pairs(&mut self, query_pairs: impl Into<Vec<qiniu_http_client::QueryPair<'req>>>) -> &mut Self {
self.0.query_pairs(query_pairs);
self
}
#[inline]
#[doc = "追加查询参数"]
pub fn append_query_pair(
&mut self,
query_pair_key: impl Into<qiniu_http_client::QueryPairKey<'req>>,
query_pair_value: impl Into<qiniu_http_client::QueryPairValue<'req>>,
) -> &mut Self {
self.0.append_query_pair(query_pair_key, query_pair_value);
self
}
#[inline]
#[doc = "设置扩展信息"]
pub fn extensions(&mut self, extensions: qiniu_http_client::http::Extensions) -> &mut Self {
self.0.extensions(extensions);
self
}
#[doc = "添加扩展信息"]
#[inline]
pub fn add_extension<T: Send + Sync + 'static>(&mut self, val: T) -> &mut Self {
self.0.add_extension(val);
self
}
#[inline]
#[doc = "上传进度回调函数"]
pub fn on_uploading_progress(
&mut self,
callback: impl Fn(
&dyn qiniu_http_client::SimplifiedCallbackContext,
&qiniu_http_client::http::TransferProgressInfo,
) -> qiniu_http_client::CallbackResult
+ Send
+ Sync
+ 'req,
) -> &mut Self {
self.0.on_uploading_progress(callback);
self
}
#[inline]
#[doc = "设置响应状态码回调函数"]
pub fn on_receive_response_status(
&mut self,
callback: impl Fn(
&dyn qiniu_http_client::SimplifiedCallbackContext,
qiniu_http_client::http::StatusCode,
) -> qiniu_http_client::CallbackResult
+ Send
+ Sync
+ 'req,
) -> &mut Self {
self.0.on_receive_response_status(callback);
self
}
#[inline]
#[doc = "设置响应 HTTP 头回调函数"]
pub fn on_receive_response_header(
&mut self,
callback: impl Fn(
&dyn qiniu_http_client::SimplifiedCallbackContext,
&qiniu_http_client::http::HeaderName,
&qiniu_http_client::http::HeaderValue,
) -> qiniu_http_client::CallbackResult
+ Send
+ Sync
+ 'req,
) -> &mut Self {
self.0.on_receive_response_header(callback);
self
}
#[inline]
#[doc = "设置域名解析前回调函数"]
pub fn on_to_resolve_domain(
&mut self,
callback: impl Fn(&mut dyn qiniu_http_client::CallbackContext, &str) -> qiniu_http_client::CallbackResult
+ Send
+ Sync
+ 'req,
) -> &mut Self {
self.0.on_to_resolve_domain(callback);
self
}
#[inline]
#[doc = "设置域名解析成功回调函数"]
pub fn on_domain_resolved(
&mut self,
callback: impl Fn(
&mut dyn qiniu_http_client::CallbackContext,
&str,
&qiniu_http_client::ResolveAnswers,
) -> qiniu_http_client::CallbackResult
+ Send
+ Sync
+ 'req,
) -> &mut Self {
self.0.on_domain_resolved(callback);
self
}
#[inline]
#[doc = "设置 IP 地址选择前回调函数"]
pub fn on_to_choose_ips(
&mut self,
callback: impl Fn(
&mut dyn qiniu_http_client::CallbackContext,
&[qiniu_http_client::IpAddrWithPort],
) -> qiniu_http_client::CallbackResult
+ Send
+ Sync
+ 'req,
) -> &mut Self {
self.0.on_to_choose_ips(callback);
self
}
#[inline]
#[doc = "设置 IP 地址选择成功回调函数"]
pub fn on_ips_chosen(
&mut self,
callback: impl Fn(
&mut dyn qiniu_http_client::CallbackContext,
&[qiniu_http_client::IpAddrWithPort],
&[qiniu_http_client::IpAddrWithPort],
) -> qiniu_http_client::CallbackResult
+ Send
+ Sync
+ 'req,
) -> &mut Self {
self.0.on_ips_chosen(callback);
self
}
#[inline]
#[doc = "设置 HTTP 请求签名前回调函数"]
pub fn on_before_request_signed(
&mut self,
callback: impl Fn(&mut dyn qiniu_http_client::ExtendedCallbackContext) -> qiniu_http_client::CallbackResult
+ Send
+ Sync
+ 'req,
) -> &mut Self {
self.0.on_before_request_signed(callback);
self
}
#[inline]
#[doc = "设置 HTTP 请求前回调函数"]
pub fn on_after_request_signed(
&mut self,
callback: impl Fn(&mut dyn qiniu_http_client::ExtendedCallbackContext) -> qiniu_http_client::CallbackResult
+ Send
+ Sync
+ 'req,
| self.0.on_after_request_signed(callback);
self
}
#[inline]
#[doc = "设置响应成功回调函数"]
pub fn on_response(
&mut self,
callback: impl Fn(
&mut dyn qiniu_http_client::ExtendedCallbackContext,
&qiniu_http_client::http::ResponseParts,
) -> qiniu_http_client::CallbackResult
+ Send
+ Sync
+ 'req,
) -> &mut Self {
self.0.on_response(callback);
self
}
#[inline]
#[doc = "设置响应错误回调函数"]
pub fn on_error(
&mut self,
callback: impl Fn(
&mut dyn qiniu_http_client::ExtendedCallbackContext,
&qiniu_http_client::ResponseError,
) -> qiniu_http_client::CallbackResult
+ Send
+ Sync
+ 'req,
) -> &mut Self {
self.0.on_error(callback);
self
}
#[inline]
#[doc = "设置退避前回调函数"]
pub fn on_before_backoff(
&mut self,
callback: impl Fn(
&mut dyn qiniu_http_client::ExtendedCallbackContext,
std::time::Duration,
) -> qiniu_http_client::CallbackResult
+ Send
+ Sync
+ 'req,
) -> &mut Self {
self.0.on_before_backoff(callback);
self
}
#[inline]
#[doc = "设置退避后回调函数"]
pub fn on_after_backoff(
&mut self,
callback: impl Fn(
&mut dyn qiniu_http_client::ExtendedCallbackContext,
std::time::Duration,
) -> qiniu_http_client::CallbackResult
+ Send
+ Sync
+ 'req,
) -> &mut Self {
self.0.on_after_backoff(callback);
self
}
#[inline]
#[doc = "获取 HTTP 请求构建器部分参数"]
pub fn parts(&self) -> &qiniu_http_client::RequestBuilderParts<'req> {
self.0.parts()
}
#[inline]
#[doc = "获取 HTTP 请求构建器部分参数的可变引用"]
pub fn parts_mut(&mut self) -> &mut qiniu_http_client::RequestBuilderParts<'req> {
self.0.parts_mut()
}
}
impl<'req, E: qiniu_http_client::EndpointsProvider + Clone + 'req> SyncRequestBuilder<'req, E> {
#[doc = "阻塞发起 HTTP 请求"]
pub fn call(&mut self) -> qiniu_http_client::ApiResult<qiniu_http_client::Response<ResponseBody>> {
let request = &mut self.0;
let response = request.call()?;
let parsed = response.parse_json()?;
Ok(parsed)
}
}
#[cfg(feature = "async")]
impl<'req, E: qiniu_http_client::EndpointsProvider + Clone + 'req> AsyncRequestBuilder<'req, E> {
#[doc = "异步发起 HTTP 请求"]
pub async fn call(&mut self) -> qiniu_http_client::ApiResult<qiniu_http_client::Response<ResponseBody>> {
let request = &mut self.0;
let response = request.call().await?;
let parsed = response.parse_json().await?;
Ok(parsed)
}
}
| ) -> &mut Self {
|
utils.py | import logging
from typing import Optional
import requests
from mtp_common import nomis
from prison.models import Prison, PrisonerLocation
logger = logging.getLogger('mtp')
def | (prisoner_location: PrisonerLocation) -> Optional[PrisonerLocation]:
new_location = None
try:
new_location = nomis.get_location(prisoner_location.prisoner_number)
if not new_location:
logger.error(
'Malformed response from NOMIS when looking up prisoner location for %(prisoner_number)s',
{'prisoner_number': prisoner_location.prisoner_number}
)
return None
new_prison = Prison.objects.get(nomis_id=new_location['nomis_id'])
except requests.RequestException:
logger.error(
'Cannot look up prisoner location for %(prisoner_number)s in NOMIS',
{'prisoner_number': prisoner_location.prisoner_number}
)
return None
except Prison.DoesNotExist:
logger.error(
'Cannot find %(nomis_id)s in Prison table',
{'nomis_id': new_location['nomis_id']}
)
return None
else:
logger.info(
'Location fetched from nomis of %(prisoner_number)s is %(prison_nomis_id)s',
{
'prisoner_number': prisoner_location.prisoner_number,
'prison_nomis_id': new_prison.nomis_id,
}
)
# This update will only persist in python space. It is NOT committed to the database
# This is because we should be calling credit_prisons_need_updating on any update to PrisonerLocation and that
# takes too long to do synchronously off the back of a user-triggered API Request
prisoner_location.prison = new_prison
return prisoner_location
| fetch_prisoner_location_from_nomis |
listener.rs | use serde::de::{Deserialize, Deserializer, Error as DeserializerError};
use std::str::FromStr;
use web3::types::H256;
use crate::components::EventProducer;
/// Deserialize an H256 hash (with or without '0x' prefix).
fn deserialize_h256<'de, D>(deserializer: D) -> Result<H256, D::Error>
where
D: Deserializer<'de>,
{
let s: String = Deserialize::deserialize(deserializer)?;
let block_hash = s.trim_start_matches("0x");
H256::from_str(block_hash).map_err(D::Error::custom)
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct | {
pub network_name: String,
#[serde(deserialize_with = "deserialize_h256")]
pub head_block_hash: H256,
pub head_block_number: u64,
}
pub trait ChainHeadUpdateListener: EventProducer<ChainHeadUpdate> {
/// Begin processing notifications coming in from Postgres.
fn start(&mut self);
}
| ChainHeadUpdate |
PostContent.tsx | import React, {Component} from 'react';
import ReactDOM from 'react-dom';
import {
Modal,
ActivityIndicator,
Icon,
Flex,
Button,
Toast,
} from 'antd-mobile';
import { Pagination } from 'antd-mobile';
import {APost,Topic} from '../forum/UiDataModel';
import {OneTopic} from '../forum/Network';
import {PostContentItem} from './PostContentItem';
import './PostContent.scss'
const backStyle = {background: '#ECECEC'};
export class PostContentProps{
atopic:OneTopic;
}
class PostContentState{
topicInfo:Topic;
postsInfo:Array<APost>;
refreshing:boolean= false;
animating: boolean=false;
down: boolean=true;
height:number;
currentPage:number=1;
totalPage:number=1;
}
let currentClass:PostContent;
export class PostContent extends Component<PostContentProps,any> {
constructor(props) {
super(props);
console.log('topicId', this.props.atopic.mtopicInfo.id);
this.state={
height:document.documentElement.clientHeight,
totalPage:1,
currentPage:1,
postsInfo:[],
topicInfo: {},
animating:false,
refreshing: false,
};
currentClass=this;
}
errorToast(msg:any){
console.log('Loading Error: ', msg);
Toast.fail(msg, 2);
currentClass.setState({
//use [this.setState] will produce null ptr;
animating: false,
});
}
componentDidMount() {
console.log('componentDidMount');
let page=1;
this.props.atopic.getPostsAndTopic(page,(topicinfo, posts) => {
document.title=topicinfo.title ;
this.setState({
postsInfo: posts,
topicInfo: topicinfo,
totalPage: topicinfo.totalPage,
});
},this.errorToast);
}
componentDidUpdate() {
console.log('componentDidUpdate');
//window.scrollTo(0, 0);
}
loadPage(targetPage:number) {
console.log('loadPage', targetPage);
this.props.atopic.getPostsAndTopic( targetPage, (topicinfo, posts) => {
this.setState({
postsInfo: posts,
topicInfo: topicinfo,
currentPage: targetPage,
refreshing: false,
animating: false,
});
window.scrollTo(0, 0);
},this.errorToast);
}
render() {
return (<div className='one-topic-body' style={backStyle}>
<div className='header-content-div'>
<Pagination className="custom-pagination-with-icon"
current={this.state.currentPage}
total={this.state.totalPage}
onChange={(i)=>{
console.log(i);
this.setState({animating:true});
this.loadPage(i);
}}
/>
</div>
<div className='main-content-div'>
{
this.state.postsInfo.map((item,index)=>(
<div>
<PostContentItem apost={item}/>
</div>
))
}
<ActivityIndicator
toast={true}
text="Loading..."
animating={this.state.animating}
/>
| [
{text: 'Cancel'},
{
text: 'Submit',
onPress: value => new Promise((resolve) => {
let targetPage = Number(value);
if (targetPage && targetPage <= this.state.totalPage) {
this.setState({animating:true});
this.loadPage(targetPage);
console.log(`jump:${value}`);
resolve();
} else {
Toast.info('看起来不像是个能跳转的地方 >.<', 2);
}
}),
},
], 'default', null, ['要去哪一页呢'])}
>Go</Button>
<div className='footer-content-div'>
<Pagination className="custom-pagination-with-icon"
current={this.state.currentPage}
total={this.state.totalPage}
onChange={(i)=>{
console.log(i);
this.setState({animating:true});
this.loadPage(i);
}}
/>
</div>
</div>);
}
}
const pageLocal={
prevText: (<span className="arrow-align"><Icon type="left"/>Prev</span>),
nextText: (<span className="arrow-align">Next<Icon type="right"/></span>),
};
const INDICATOR={ activate: 'release', deactivate: 'pull', release: 'loading', finish: 'finish' };
//http://www.cc98.org/topic/4748615 |
</div>
<Button className='go-btn' onClick={() => Modal.prompt('页面跳转', '',
|
result.py | from __future__ import absolute_import
import json
from .model import OpenShiftPythonException
class Result(object):
| def __init__(self, high_level_operation, tracking_limit=None):
self.high_level_operation = high_level_operation
self.__actions = []
# if tracking_limit is less than 0 that means unlimited tracking_limit
if tracking_limit is not None and tracking_limit >= 0:
self.limit_tracking_actions = tracking_limit
else:
self.limit_tracking_actions = None
def actions(self):
my_list = [a for a in self.__actions if not a.internal]
return my_list
# Returns a bitwise OR of all underlying action statuses (if 0, all actions returned 0)
def status(self):
s = 0
for action in self.__actions:
# If not the last attempt, return status does not matter; errors ignored.
if action.last_attempt:
s |= int(action.status)
return s
# Returns aggregate stdout from all underlying actions
def out(self):
s = u''
for action in self.__actions:
if action.out:
s += action.out
if not s.endswith("\n"):
s += u'\n'
return s
def get_timeout(self):
"""
:return: Iterates through all actions in this Result and returns the first Action object
it finds that indicates it timed out. If no action timed out, returns None.
"""
for action in self.__actions:
if action.timeout:
return action
return None
# Returns aggregate stderr from all underlying actions
def err(self):
s = u''
for action in self.__actions:
if action.err:
s += action.err
if not s.endswith("\n"):
s += u'\n'
return s
def as_dict(self, truncate_stdout=-1, redact_tokens=True, redact_streams=True, redact_references=True):
m = {
"operation": self.high_level_operation,
"status": self.status(),
"actions": [action.as_dict(truncate_stdout=truncate_stdout, redact_tokens=redact_tokens,
redact_references=redact_references,
redact_streams=redact_streams) for action in self.__actions]
}
return m
def as_json(self, indent=4, truncate_stdout=-1, redact_tokens=True, redact_streams=True, redact_references=True):
return json.dumps(
self.as_dict(truncate_stdout=truncate_stdout, redact_tokens=redact_tokens,
redact_references=redact_references, redact_streams=redact_streams),
indent=indent)
def add_action(self, action):
self.__actions.append(action)
if self.limit_tracking_actions is not None and len(self.__actions) > self.limit_tracking_actions:
self.__actions.pop(0)
def add_result(self, result):
self.__actions.extend(result.__actions)
def __repr__(self):
return self.as_json()
def fail_if(self, msg):
if self.get_timeout():
msg += " (Timeout during: {})".format(self.get_timeout().as_dict()['cmd'])
if self.status() != 0:
raise OpenShiftPythonException(msg, self) |
|
event.d.ts | import Breadcrumb from './breadcrumb'
import {
App,
Device,
Request,
Logger,
User,
Thread,
Stackframe,
FeatureFlag
} from './common'
declare class | {
public static create(
maybeError: any,
tolerateNonErrors: boolean,
handledState: HandledState,
component: string,
errorFramesToSkip: number,
logger?: Logger
): Event
public app: App
public device: Device
public request: Request
public errors: Error[];
public breadcrumbs: Breadcrumb[]
public threads: Thread[]
public severity: 'info' | 'warning' | 'error'
public readonly originalError: any
public unhandled: boolean
public apiKey?: string
public context?: string
public groupingHash?: string
// user
public getUser(): User
public setUser(id?: string, email?: string, name?: string): void
// metadata
public addMetadata(section: string, values: { [key: string]: any }): void
public addMetadata(section: string, key: string, value: any): void
public getMetadata(section: string, key?: string): any
public clearMetadata(section: string, key?: string): void
// feature flags
public addFeatureFlag(name: string, variant?: string | null): void
public addFeatureFlags(featureFlags: FeatureFlag[]): void
public clearFeatureFlag(name: string): void
public clearFeatureFlags(): void
}
interface HandledState {
severity: string
unhandled: boolean
severityReason: {
type: string
[key: string]: any
}
}
export interface Error {
errorClass: string
errorMessage: string
stacktrace: Stackframe[]
type: string
}
export default Event
| Event |
tanh.py | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tanh bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow_probability.python.bijectors import bijector
__all__ = [
"Tanh",
]
class | (bijector.Bijector):
"""Bijector that computes `Y = tanh(X)`, therefore `Y in (-1, 1)`.
This can be achieved by an affine transform of the Sigmoid bijector, i.e.,
it is equivalent to
```
tfb.Chain([tfb.Affine(shift=-1, scale=2.),
tfb.Sigmoid(),
tfb.Affine(scale=2.)])
```
However, using the `Tanh` bijector directly is slightly faster and more
numerically stable.
"""
def __init__(self, validate_args=False, name="tanh"):
super(Tanh, self).__init__(
forward_min_event_ndims=0,
validate_args=validate_args,
name=name)
def _forward(self, x):
return tf.nn.tanh(x)
def _inverse(self, y):
return tf.atanh(y)
def _inverse_log_det_jacobian(self, y):
return -tf.log1p(-tf.square(y))
def _forward_log_det_jacobian(self, x):
# This formula is mathematically equivalent to
# `tf.log1p(-tf.square(tf.tanh(x)))`, however this code is more numerically
# stable.
# Derivation:
# log(1 - tanh(x)^2)
# = log(sech(x)^2)
# = 2 * log(sech(x))
# = 2 * log(2e^-x / (e^-2x + 1))
# = 2 * (log(2) - x - log(e^-2x + 1))
# = 2 * (log(2) - x - softplus(-2x))
return 2. * (np.log(2.) - x - tf.nn.softplus(-2. * x))
| Tanh |
fixtures_test.go | package tombstone_test
import (
"database/sql/driver"
"github.com/kyma-incubator/compass/components/director/internal/domain/tombstone"
"github.com/kyma-incubator/compass/components/director/internal/model"
)
const (
tenantID = "tenantID"
appID = "appID"
ordID = "com.compass.v1"
externalTenantID = "externalTenantID"
)
func fixEntityTombstone() *tombstone.Entity {
return &tombstone.Entity{
OrdID: ordID,
TenantID: tenantID,
ApplicationID: appID,
RemovalDate: "removalDate",
}
}
func fixTombstoneModel() *model.Tombstone {
return &model.Tombstone{
OrdID: ordID,
TenantID: tenantID,
ApplicationID: appID,
RemovalDate: "removalDate",
}
}
func fixTombstoneModelInput() *model.TombstoneInput {
return &model.TombstoneInput{
OrdID: ordID,
RemovalDate: "removalDate",
}
}
func fixTombstoneColumns() []string {
return []string{"ord_id", "tenant_id", "app_id", "removal_date"}
}
func fixTombstoneRow() []driver.Value {
return []driver.Value{ordID, tenantID, appID, "removalDate"}
}
func fixTombstoneUpdateArgs() []driver.Value | {
return []driver.Value{"removalDate"}
} |
|
interpolate.py | import numpy as np
import scipy.interpolate as interpolate
import matplotlib.pyplot as plt
import clusterbuster.mathut as math
"""
Start with e.g. InterpolateRadio2D(psiFile = '../Analysis_MUSIC2/Hoeft_radio/mach_psi_tablefine(10,3).txt', inter=(10,6))
"""
# from http://stackoverflow.com/questions/5328128/scipy-interpolation-of-large-matrix
def my_interp(X, Y, Z, x, y, spn=3):
xs,ys = map(np.array,(x,y))
z = np.zeros(xs.shape)
for i,(x,y) in enumerate(zip(xs,ys)):
# get the indices of the nearest x,y
xi = np.argmin(np.abs(X[0,:]-x))
yi = np.argmin(np.abs(Y[:,0]-y))
xlo = max(xi-spn, 0)
ylo = max(yi-spn, 0)
xhi = min(xi+spn, X[0,:].size)
yhi = min(yi+spn, Y[:,0].size)
# make slices of X,Y,Z that are only a few items wide
nX = X[xlo:xhi, ylo:yhi]
nY = Y[xlo:xhi, ylo:yhi]
nZ = Z[xlo:xhi, ylo:yhi]
intp = interpolate.interp2d(nX, nY, nZ)
z[i] = intp(x,y)[0]
return z
# from here on: done by myself
def LoadFile_psi(psiFile):
""" Just gives the Mach number and Temperature values """
#=== FILE A ===#
# read first line .... split it and convert sstring to float science float('1.31E+01') or for a list:map(float, ['3.76E+00', '1.31E+01', '1.14E+01'])
with open(psiFile, 'r') as f:
first_line = f.readline()
psi_x = first_line.split()[2:] # Splits into list without first two elements
psi_x = np.asarray( [float(i) for i in psi_x ] ) # Converts strings to floats # Converts strings to floats
psi_y = np.loadtxt(psiFile,skiprows=0)[:,0]
return psi_x, psi_y
def InterpolateRadio2D(psiFile='../Analysis_MUSIC2/Hoeft_radio/mach_psi_table.txt', machFile='../Analysis_MUSIC2/Hoeft_radio/q_mach_machr_table.txt', saveplot='../Analysis_MUSIC2/Hoeft_radio/interpolated', psiFileNew = False, machFileNew = False, inter=(10,3)):
# Currently the mach number is interpolated in an logarithmic space which is much sparser at lower mach numbers then anticipated
# I suspect an double-exponential function for mach (both efficiency dependency stepsize)
# Note that the original grid given in 'Hoeft_radio/mach_psi_table.txt' is (quite) regular in log-loglog space, which makes it very simple to invoke an interpolation function!
# Irregular data points would make it nececcary to use functions like scipy.interpolate.griddata(points, values, (grid_x, grid_y), method='cubic')
plot_old = False
plot_new = False
plot_PhD = True
##==== psiFile for psi factor; machfile for mach-numbers conversion factors
H_mach = np.loadtxt(machFile,skiprows=0)
H_psi = np.loadtxt(psiFile,skiprows=0)[:,1::] # you wont get the temperature values ... read them separetely
psi_x,psi_y = LoadFile_psi(psiFile)
psi_x = np.log10( psi_x ) # converts to and log10 space
psi_y = np.log10(np.log10( psi_y )) # converts to and log10(log10) space
X, Y = np.meshgrid(psi_x, psi_y)
Z = np.log10(H_psi)
#interp_spline = interpolate.interp2d(x, y, Z) #, kind='cubic'
interp_spline = interpolate.RectBivariateSpline(psi_y, psi_x, Z) #, bbox=[None, None, None, None], kx=3, ky=3, s=0
xnew = np.arange(psi_x[0], psi_x[-1], (psi_x[-1]-psi_x[0])/(len(psi_x)*inter[0]) ) #np.arange(-4, 2, 4e-2) #
ynew = np.arange(psi_y[0], psi_y[-1], (psi_y[-1]-psi_y[0])/(len(psi_y)*inter[1]) ) #np.arange(0.2, 3, 2e-2) #
Znew = interp_spline(ynew, xnew )
keV2K = 1.16e7 # Translates keV to Kelvin
if plot_old:
plt.plot( np.arange(0, len(psi_x), 1 ), psi_x )
plt.plot( np.arange(0, len(psi_y), 1 ), psi_y )
plt.savefig(saveplot + '_linearity.png')
fig = plt.figure()
ax1 = plt.subplot(121)
ax1.pcolor( np.log10(keV2K) + psi_x, psi_y, Z)
ax1.set_title("Sparsely sampled function")
ax1.set_xlim([3.1, 9])
ax1.set_ylim([psi_y[0], 0.5])
ax1.set_xlabel('$\\mathrm{log_{10}(T)\\,[K]}$ ')
ax1.set_ylabel('$\\mathrm{log_{10}(log_{10}(M))\\,[]}$')
ax2 = plt.subplot(122)
im2 = ax2.pcolor( np.log10(keV2K) + xnew, ynew, Znew)
ax2.set_title("Interpolated function")
ax2.set_xlim([3.1, 9])
ax2.set_ylim([psi_y[0], 0.5])
ax2.set_xlabel('$\\mathrm{log_{10}(T)\\,[K]}$ ')
ax2.set_yticklabels([])
mach = [1.5,2.2,3.0,10.0]
c = [plt.cm.rainbow( (np.log10(np.log10(m))-ax1.get_ylim()[0])/abs(ax1.get_ylim()[1]-ax1.get_ylim()[0]) ) for m in mach]
for ii,m in enumerate(mach): | ax2.plot( [ax2.get_xlim()[0], ax2.get_xlim()[1]] , [np.log10(np.log10(m))]*2, '-', c=c[ii], lw=1.5, alpha=0.9 )
ax1.text(ax1.get_xlim()[0]+0.3, np.log10(np.log10(m))+0.02, 'Mach$=$%4.1f' % (m), fontsize=10, color=c[ii], alpha=0.9)
ax2.text(ax2.get_xlim()[0]+0.3, np.log10(np.log10(m))+0.02, 'Mach$=$%4.1f' % (m), fontsize=10, color=c[ii], alpha=0.9)
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
fig.colorbar(im2, cax=cbar_ax)
plt.savefig(saveplot + '.png')
if plot_new:
fig = plt.figure()
ax1 = plt.subplot(111)
im2 = ax1.pcolor( np.log10(keV2K) + xnew, ynew, Znew, vmin=-8)
# ax1.set_title("Interpolated function")
ax1.set_xlim([7, 8.4])
ax1.set_ylim([np.log10(np.log10(1.7)), np.log10(np.log10(10.))])
ax1.set_xlabel('$\\mathrm{log_{10}(T)\\,[K]}$ ')
ax1.set_ylabel('$M$ ')
y_ticks = [np.log10(np.log10(m)) for m in [1.7,2.5,4,10]]
print( ['%.2e' % (y) for y in y_ticks], [10**(10**y) for y in y_ticks] )
ax1.set_yticklabels([10**(10**y) for y in y_ticks])
plt.yticks(y_ticks)
# temp = [1.5,2.2,3.0,10.0]
# c = [plt.cm.rainbow( (np.log10(np.log10(m))-ax1.get_ylim()[0])/abs(ax1.get_ylim()[1]-ax1.get_ylim()[0]) ) for m in mach]
# for ii,m in enumerate(mach):
# ax1.plot( [ax1.get_xlim()[0], ax1.get_xlim()[1]] , [np.log10(np.log10(m))]*2, '-', c=c[ii], lw=1.5, alpha=0.9 )
# ax2.plot( [ax2.get_xlim()[0], ax2.get_xlim()[1]] , [np.log10(np.log10(m))]*2, '-', c=c[ii], lw=1.5, alpha=0.9 )
#
# ax1.text(ax1.get_xlim()[0]+0.3, np.log10(np.log10(m))+0.02, 'Mach$=$%4.1f' % (m), fontsize=10, color=c[ii], alpha=0.9)
# ax2.text(ax2.get_xlim()[0]+0.3, np.log10(np.log10(m))+0.02, 'Mach$=$%4.1f' % (m), fontsize=10, color=c[ii], alpha=0.9)
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
fig.colorbar(im2, cax=cbar_ax, label='$\log_{10}\Phi$',)
plt.savefig(saveplot + '_DSA.pdf')
plt.savefig(saveplot + '_DSA.png', dpi=800)
if plot_PhD:
fig = plt.figure()
temp = np.linspace(2,20,20)
print(temp)
mach = np.linspace(2,7,300)
psi_x,psi_y = LoadFile_psi(psiFile)
import itertools
H,M,T = [],[],[]
for t in temp:
results_temp = math.find_closest(psi_x, t)
results_mach = math.find_closest(psi_y, mach) #
H.append(H_psi[results_mach,np.ones_like(results_mach)*results_temp])
M.append(mach)
T.append(np.ones_like(results_mach)*t)
H = list(itertools.chain.from_iterable(H))
M = list(itertools.chain.from_iterable(M))
T = list(itertools.chain.from_iterable(T))
plt.scatter(M,np.log10(H),c=T,alpha=0.1,s=5)
cb = plt.colorbar(label='Downstream Temperature [keV]')
cb.set_alpha(1)
cb.draw_all()
plt.xlabel('Mach number $M$')
plt.ylabel('$\log_{10}\,\Phi(M,T)$')
plt.savefig(saveplot + '_PhD.pdf')
plt.savefig(saveplot + '_PhD.png', dpi=800)
# Save File A
if psiFileNew:
location = psiFileNew
else:
location = psiFile.replace('.txt', 'fine(%i,%i).txt' % (inter[0],inter[1]) )
header = '# Mach'
for x in xnew:
header += '%13.4e' % (10**x)
mf = open(location,"w")
mf.write(header + '\n')
for ii,y in enumerate(ynew):
string = '%9.4f' % (10**(10**y)) + ''.join(['%13.4e' % (10**z) for z in Znew[ii][:]])
mf.write(string + '\n')
mf.close()
#=== FILE B ===#
Y_new = np.empty( (1,1) )
for ii,h in enumerate(H_mach.T):
interp_spline = interpolate.interp1d( 10**psi_y , h, kind='cubic')
if Y_new.shape[0] > 1:
Y_new = np.hstack( (Y_new, np.expand_dims(interp_spline( 10**ynew ), axis=1) ) )
else:
Y_new = np.expand_dims(interp_spline( 10**ynew ), axis=1)
# Save File B
if machFileNew:
location = machFileNew
else:
location = machFile.replace('.txt', 'fine(%i,%i).txt' % (inter[0],inter[1]) )
header = '# q M r M*(1-1/r) s'
mf = open(location,"w")
mf.write(header + '\n')
for ii,y in enumerate(10**ynew):
string = ''.join(['%14.6e' % (y) for y in Y_new[:][ii]]) #some numbers are very large and ewould need a good margin
mf.write(string + '\n')
mf.close()
return 0
if __name__ == "__main__":
InterpolateRadio2D(psiFile = '../Analysis_MUSIC2/Hoeft_radio/mach_psi_tablefine(10,3).txt', inter=(3,2)) #(90,27) | ax1.plot( [ax1.get_xlim()[0], ax1.get_xlim()[1]] , [np.log10(np.log10(m))]*2, '-', c=c[ii], lw=1.5, alpha=0.9 ) |
pre_process.py | import numpy as np
from torchvision import transforms
import os
from PIL import Image, ImageOps
import numbers
import torch
class ResizeImage():
def | (self, size):
if isinstance(size, int):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, img):
th, tw = self.size
return img.resize((th, tw))
class RandomSizedCrop(object):
"""Crop the given PIL.Image to random size and aspect ratio.
A crop of random size of (0.08 to 1.0) of the original size and a random
aspect ratio of 3/4 to 4/3 of the original aspect ratio is made. This crop
is finally resized to given size.
This is popularly used to train the Inception networks.
Args:
size: size of the smaller edge
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, size, interpolation=Image.BILINEAR):
self.size = size
self.interpolation = interpolation
def __call__(self, img):
h_off = random.randint(0, img.shape[1]-self.size)
w_off = random.randint(0, img.shape[2]-self.size)
img = img[:, h_off:h_off+self.size, w_off:w_off+self.size]
return img
class Normalize(object):
"""Normalize an tensor image with mean and standard deviation.
Given mean: (R, G, B),
will normalize each channel of the torch.*Tensor, i.e.
channel = channel - mean
Args:
mean (sequence): Sequence of means for R, G, B channels respecitvely.
"""
def __init__(self, mean=None, meanfile=None):
if mean:
self.mean = mean
else:
arr = np.load(meanfile)
self.mean = torch.from_numpy(arr.astype('float32')/255.0)[[2, 1, 0], :, :]
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
# TODO: make efficient
for t, m in zip(tensor, self.mean):
t.sub_(m)
return tensor
class PlaceCrop(object):
"""Crops the given PIL.Image at the particular index.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (w, h), a square crop (size, size) is
made.
"""
def __init__(self, size, start_x, start_y):
if isinstance(size, int):
self.size = (int(size), int(size))
else:
self.size = size
self.start_x = start_x
self.start_y = start_y
def __call__(self, img):
"""
Args:
img (PIL.Image): Image to be cropped.
Returns:
PIL.Image: Cropped image.
"""
th, tw = self.size
return img.crop((self.start_x, self.start_y, self.start_x + tw, self.start_y + th))
class ForceFlip(object):
"""Horizontally flip the given PIL.Image randomly with a probability of 0.5."""
def __call__(self, img):
"""
Args:
img (PIL.Image): Image to be flipped.
Returns:
PIL.Image: Randomly flipped image.
"""
return img.transpose(Image.FLIP_LEFT_RIGHT)
class CenterCrop(object):
"""Crops the given PIL.Image at the center.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, img):
"""
Args:
img (PIL.Image): Image to be cropped.
Returns:
PIL.Image: Cropped image.
"""
w, h = (img.shape[1], img.shape[2])
th, tw = self.size
w_off = int((w - tw) / 2.)
h_off = int((h - th) / 2.)
img = img[:, h_off:h_off+th, w_off:w_off+tw]
return img
def image_train(resize_size=256, crop_size=224, alexnet=False):
if not alexnet:
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
else:
normalize = Normalize(meanfile='./ilsvrc_2012_mean.npy')
return transforms.Compose([
transforms.Resize((resize_size, resize_size)),
transforms.RandomResizedCrop(crop_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
])
def image_target(resize_size=256, crop_size=224, alexnet=False):
if not alexnet:
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
else:
normalize = Normalize(meanfile='./ilsvrc_2012_mean.npy')
return transforms.Compose([
transforms.Resize((resize_size, resize_size)),
transforms.RandomCrop(crop_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
])
def image_test(resize_size=256, crop_size=224, alexnet=False):
if not alexnet:
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
else:
normalize = Normalize(meanfile='./ilsvrc_2012_mean.npy')
start_first = 0
start_center = (resize_size - crop_size - 1) / 2
start_last = resize_size - crop_size - 1
return transforms.Compose([
transforms.Resize((resize_size, resize_size)),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize
])
def image_test_10crop(resize_size=256, crop_size=224, alexnet=False):
if not alexnet:
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
else:
normalize = Normalize(meanfile='./ilsvrc_2012_mean.npy')
start_first = 0
start_center = (resize_size - crop_size - 1) / 2
start_last = resize_size - crop_size - 1
data_transforms = [
transforms.Compose([
ResizeImage(resize_size), ForceFlip(),
PlaceCrop(crop_size, start_first, start_first),
transforms.ToTensor(),
normalize
]),
transforms.Compose([
ResizeImage(resize_size), ForceFlip(),
PlaceCrop(crop_size, start_last, start_last),
transforms.ToTensor(),
normalize
]),
transforms.Compose([
ResizeImage(resize_size), ForceFlip(),
PlaceCrop(crop_size, start_last, start_first),
transforms.ToTensor(),
normalize
]),
transforms.Compose([
ResizeImage(resize_size), ForceFlip(),
PlaceCrop(crop_size, start_first, start_last),
transforms.ToTensor(),
normalize
]),
transforms.Compose([
ResizeImage(resize_size), ForceFlip(),
PlaceCrop(crop_size, start_center, start_center),
transforms.ToTensor(),
normalize
]),
transforms.Compose([
ResizeImage(resize_size),
PlaceCrop(crop_size, start_first, start_first),
transforms.ToTensor(),
normalize
]),
transforms.Compose([
ResizeImage(resize_size),
PlaceCrop(crop_size, start_last, start_last),
transforms.ToTensor(),
normalize
]),
transforms.Compose([
ResizeImage(resize_size),
PlaceCrop(crop_size, start_last, start_first),
transforms.ToTensor(),
normalize
]),
transforms.Compose([
ResizeImage(resize_size),
PlaceCrop(crop_size, start_first, start_last),
transforms.ToTensor(),
normalize
]),
transforms.Compose([
ResizeImage(resize_size),
PlaceCrop(crop_size, start_center, start_center),
transforms.ToTensor(),
normalize
])
]
return data_transforms
| __init__ |
conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
import sphinx_rtd_theme
import sys
import os
print("CURRENT WORKING DIRECTORY")
print(os.getcwd())
print('adding path')
sys.path.insert(0, r'C:\Users\ICN_admin\Documents\py_neuromodulation\pyneuromodulation')
print(sys.path) |
# At top on conf.py (with other import statements)
import recommonmark
from recommonmark.transform import AutoStructify
from recommonmark.parser import CommonMarkParser
# -- Project information -----------------------------------------------------
project = 'py_neuromodulation'
copyright = '2021, Timon Merk'
author = 'Timon Merk'
source_parsers = {
'.md': 'recommonmark.parser.CommonMarkParser',
}
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinx.ext.mathjax',
'numpydoc',
'sphinx_rtd_theme',
'sphinx.ext.napoleon',
'sphinx.ext.autosectionlabel',
'nbsphinx',
'recommonmark'
]
source_suffix = ['.rst', '.md', '.ipynb']
autosummary_generate = True
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static'] | |
move_battle_style_api.rs | /*
* No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
*
* The version of the OpenAPI document: 20220523
*
* Generated by: https://openapi-generator.tech
*/
use reqwest;
use crate::apis::ResponseContent;
use super::{Error, configuration};
/// struct for typed errors of method [`move_battle_style_list`]
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(untagged)]
pub enum MoveBattleStyleListError {
DefaultResponse(String),
UnknownValue(serde_json::Value),
}
/// struct for typed errors of method [`move_battle_style_read`]
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(untagged)]
pub enum MoveBattleStyleReadError {
DefaultResponse(String),
UnknownValue(serde_json::Value),
}
pub async fn move_battle_style_list(configuration: &configuration::Configuration, limit: Option<i32>, offset: Option<i32>) -> Result<String, Error<MoveBattleStyleListError>> {
let local_var_configuration = configuration;
let local_var_client = &local_var_configuration.client;
let local_var_uri_str = format!("{}/api/v2/move-battle-style/", local_var_configuration.base_path);
let mut local_var_req_builder = local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str());
if let Some(ref local_var_str) = limit {
local_var_req_builder = local_var_req_builder.query(&[("limit", &local_var_str.to_string())]);
}
if let Some(ref local_var_str) = offset {
local_var_req_builder = local_var_req_builder.query(&[("offset", &local_var_str.to_string())]);
}
if let Some(ref local_var_user_agent) = local_var_configuration.user_agent {
local_var_req_builder = local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone());
}
let local_var_req = local_var_req_builder.build()?;
let local_var_resp = local_var_client.execute(local_var_req).await?;
let local_var_status = local_var_resp.status();
let local_var_content = local_var_resp.text().await?;
if !local_var_status.is_client_error() && !local_var_status.is_server_error() {
serde_json::from_str(&local_var_content).map_err(Error::from)
} else {
let local_var_entity: Option<MoveBattleStyleListError> = serde_json::from_str(&local_var_content).ok();
let local_var_error = ResponseContent { status: local_var_status, content: local_var_content, entity: local_var_entity };
Err(Error::ResponseError(local_var_error))
}
}
pub async fn move_battle_style_read(configuration: &configuration::Configuration, id: i32) -> Result<String, Error<MoveBattleStyleReadError>> {
let local_var_configuration = configuration;
let local_var_client = &local_var_configuration.client;
let local_var_uri_str = format!("{}/api/v2/move-battle-style/{id}/", local_var_configuration.base_path, id=id);
let mut local_var_req_builder = local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str());
if let Some(ref local_var_user_agent) = local_var_configuration.user_agent {
local_var_req_builder = local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone());
}
let local_var_req = local_var_req_builder.build()?;
let local_var_resp = local_var_client.execute(local_var_req).await?;
let local_var_status = local_var_resp.status();
let local_var_content = local_var_resp.text().await?;
if !local_var_status.is_client_error() && !local_var_status.is_server_error() {
serde_json::from_str(&local_var_content).map_err(Error::from)
} else { | }
} | let local_var_entity: Option<MoveBattleStyleReadError> = serde_json::from_str(&local_var_content).ok();
let local_var_error = ResponseContent { status: local_var_status, content: local_var_content, entity: local_var_entity };
Err(Error::ResponseError(local_var_error)) |
signature.rs | use super::{Atom, Context, Operator, Rule, Term, Variable, TRS};
use std::collections::HashMap;
use std::fmt;
use std::hash::{Hash, Hasher};
use std::sync::{Arc, RwLock};
/// Records a universe of symbols.
///
/// Use [`Signature::default`] for a blank `Signature`, or [`Signature::new`] to initialize a
/// `Signature` with given [`Operator`]s.
///
/// [`Signature::default`]: #method.default
/// [`Signature::new`]: #method.new
/// [`Operator`]: struct.Operator.html
///
/// # Examples
///
/// ```
/// # use term_rewriting::{Signature, parse_term};
/// // Constructing a Signature using the default
/// let mut sig1 = Signature::default();
/// let a = sig1.new_op(2, Some("A".to_string()));
/// let b = sig1.new_op(0, Some("B".to_string()));
/// let c = sig1.new_op(0, Some("C".to_string()));
///
/// // Constructing a Signature using Signature::new
/// let mut sig2 = Signature::new(vec![
/// (2, Some("A".to_string())),
/// (0, Some("B".to_string())),
/// (0, Some("C".to_string())),
/// ]);
///
/// assert_eq!(sig1, sig2);
/// ```
#[derive(Clone)]
pub struct Signature {
pub(crate) sig: Arc<RwLock<Sig>>,
}
impl Signature {
/// Construct a `Signature` with the given [`Operator`]s.
///
/// Each [`Operator`] is specified in the form of `(arity, Some(name))` or
/// `(arity, None)`, where `arity` is the number of arguments a [`Term`] takes
/// (for example, an `arity` of 0 gives a "constant" [`Operator`]). A `name` for
/// the [`Operator`] is unnecessary, but may be supplied for more readable
/// formatting.
///
/// The returned vector of [`Operator`]s corresponds to the supplied spec.
///
/// [`Operator`]: struct.Operator.html
/// [`Term`]: struct.Term.html
///
/// # Examples
///
/// ```
/// # use term_rewriting::Signature;
/// let mut sig = Signature::new(vec![
/// (2, Some(".".to_string())),
/// (0, Some("S".to_string())),
/// (0, Some("K".to_string())),
/// ]);
/// let ops = sig.operators();
///
/// let op_names: Vec<String> = ops.iter().map(|op| op.display()).collect();
/// assert_eq!(op_names, vec![".", "S", "K"]);
///
/// let mut sig2 = Signature::default();
/// let p = sig2.new_op(2, Some(".".to_string()));
/// let s = sig2.new_op(0, Some("S".to_string()));
/// let k = sig2.new_op(0, Some("K".to_string()));
///
/// assert_eq!(sig, sig2);
///
/// let mut sig = Signature::new(vec![]);
///
/// let mut sig2 = Signature::default();
///
/// assert_eq!(sig, sig2);
///```
pub fn new(operator_spec: Vec<(u32, Option<String>)>) -> Signature {
Signature {
sig: Arc::new(RwLock::new(Sig::new(operator_spec))),
}
}
/// Returns every [`Operator`] known to the `Signature`, in the order they were created.
///
/// [`Operator`]: struct.Operator.html
///
/// # Examples
///
/// ```
/// # use term_rewriting::Signature;
/// let mut sig = Signature:: new(vec![
/// (2, Some(".".to_string())),
/// (0, Some("S".to_string())),
/// (0, Some("K".to_string())),
/// ]);
///
/// let ops: Vec<String> = sig.operators().iter().map(|op| op.display()).collect();;
///
/// assert_eq!(ops, vec![".", "S", "K"]);
///```
pub fn operators(&self) -> Vec<Operator> {
self.sig
.read()
.expect("poisoned signature")
.operators()
.into_iter()
.map(|id| Operator {
id,
sig: self.clone(),
})
.collect()
}
/// Returns every [`Variable`] known to the `Signature`, in the order they were created.
///
/// [`Variable`]: struct.Variable.html
///
/// # Examples
///
/// ```
/// # use term_rewriting::{Signature, parse_term};
/// let mut sig = Signature:: new(vec![
/// (2, Some(".".to_string())),
/// (0, Some("S".to_string())),
/// (0, Some("K".to_string())),
/// ]);
///
/// parse_term(&mut sig, "A(x_ y_)").expect("parse of A(x_ y_)");
///
/// let vars: Vec<String> = sig.variables().iter().map(|v| v.display()).collect();
///
/// assert_eq!(vars, vec!["x_", "y_"]);
///```
pub fn variables(&self) -> Vec<Variable> {
self.sig
.read()
.expect("poisoned signature")
.variables()
.into_iter()
.map(|id| Variable {
id,
sig: self.clone(),
})
.collect()
}
/// Returns every [`Atom`] known to the `Signature`.
///
/// [`Atom`]: enum.Atom.html
///
/// # Examples
///
/// ```
/// # use term_rewriting::{Signature, parse_term};
/// let mut sig = Signature::default();
///
/// parse_term(&mut sig, "A(x_ B(y_))").expect("parse of A(x_ B(y_))");
///
/// let atoms: Vec<String> = sig.atoms().iter().map(|a| a.display()).collect();
///
/// assert_eq!(atoms, vec!["x_", "y_", "B", "A"]);
/// ```
pub fn atoms(&self) -> Vec<Atom> {
let vars = self.variables().into_iter().map(Atom::Variable);
let ops = self.operators().into_iter().map(Atom::Operator);
vars.chain(ops).collect()
}
/// Create a new [`Operator`] distinct from all existing [`Operator`]s.
///
/// [`Operator`]: struct.Operator.html
///
/// # Examples
///
/// ```
/// # use term_rewriting::{Signature};
/// let mut sig = Signature::default();
///
/// let a = sig.new_op(1, Some(".".to_string()));
/// let s = sig.new_op(2, Some("S".to_string()));
/// let s2 = sig.new_op(2, Some("S".to_string()));
///
/// assert_ne!(a, s);
/// assert_ne!(a, s2);
/// assert_ne!(s, s2);
/// ```
pub fn new_op(&mut self, arity: u32, name: Option<String>) -> Operator {
let id = self
.sig
.write()
.expect("poisoned signature")
.new_op(arity, name);
Operator {
id,
sig: self.clone(),
}
}
/// Create a new [`Variable`] distinct from all existing [`Variable`]s.
///
/// [`Variable`]: struct.Variable.html
///
/// # Examples
///
/// ```
/// # use term_rewriting::{Signature};
/// let mut sig = Signature::default();
///
/// let z = sig.new_var(Some("z".to_string()));
/// let z2 = sig.new_var(Some("z".to_string()));
///
/// assert_ne!(z, z2);
/// ```
pub fn new_var(&mut self, name: Option<String>) -> Variable {
let id = self.sig.write().expect("poisoned signature").new_var(name);
Variable {
id,
sig: self.clone(),
}
}
/// Merge two `Signature`s. All [`Term`]s, [`Context`]s, [`Rule`]s, and [`TRS`]s associated
/// with the `other` `Signature` should be `reified` using methods provided
/// by the returned [`SignatureChange`].
///
/// [`Term`]: struct.Term.html
/// [`Context`]: struct.Context.html
/// [`Rule`]: struct.Rule.html
/// [`TRS`]: struct.TRS.html
/// [`SignatureChange`]: struct.SignatureChange.html
///
/// # Examples
///
/// ```
/// # use term_rewriting::{Signature, MergeStrategy};
/// // Merging 2 signatures by assuming all operators in the second are distinct from the first.
/// let mut sig1 = Signature::new(vec![
/// (2, Some(".".to_string())),
/// (0, Some("S".to_string())),
/// (0, Some("K".to_string())),
/// ]);
///
/// let mut sig2 = Signature::new(vec![
/// (2, Some("A".to_string())),
/// (1, Some("B".to_string())),
/// (0, Some("C".to_string())),
/// ]);
///
/// sig1.merge(&sig2, MergeStrategy::DistinctOperators);
///
/// let ops: Vec<String> = sig1.operators().iter().map(|op| op.display()).collect();
///
/// assert_eq!(ops, vec![".", "S", "K", "A", "B", "C"]);
///
/// // Merging 2 signatures by assuming all operators in the second are the same from the first.
/// let mut sig1 = Signature::new(vec![
/// (2, Some(".".to_string())),
/// (0, Some("S".to_string())),
/// (0, Some("K".to_string())),
/// ]);
///
/// let mut sig2 = Signature::new(vec![
/// (2, Some(".".to_string())),
/// (0, Some("S".to_string())),
/// (0, Some("K".to_string())),
/// ]);
///
/// sig1.merge(&sig2, MergeStrategy::SameOperators);
///
/// let ops: Vec<String> = sig1.operators().iter().map(|op| op.display()).collect();
///
/// assert_eq!(ops, vec![".", "S", "K"]);
///
/// // Merging 2 signatures by SameOperators should fail if all operators in both signatures are not the same.
/// let mut sig1 = Signature::new(vec![
/// (2, Some(".".to_string())),
/// (0, Some("S".to_string())),
/// (0, Some("K".to_string())),
/// ]);
///
/// let mut sig2 = Signature::new(vec![
/// (2, Some(".".to_string())),
/// (1, Some("S".to_string())),
/// (0, Some("K".to_string())),
/// ]);
///
/// assert!(sig1.merge(&sig2, MergeStrategy::SameOperators).is_err());
///
/// // Merging 2 signatures assuming any operators with the same name and arity are the same.
/// let mut sig1 = Signature::new(vec![
/// (2, Some(".".to_string())),
/// (0, Some("S".to_string())),
/// (0, Some("K".to_string())),
/// ]);
///
/// let mut sig2 = Signature::new(vec![
/// (2, Some("A".to_string())),
/// (1, Some("B".to_string())),
/// (0, Some("K".to_string())),
/// ]);
///
/// sig1.merge(&sig2, MergeStrategy::OperatorsByArityAndName);
///
/// let ops: Vec<String> = sig1.operators().iter().map(|op| op.display()).collect();
///
/// assert_eq!(ops, vec![".", "S", "K", "A", "B"]);
/// ```
pub fn merge(&self, other: &Signature, strategy: MergeStrategy) -> Result<SignatureChange, ()> {
self.sig
.write()
.expect("poisoned signature")
.merge(&other, strategy)
}
}
impl fmt::Debug for Signature {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let sig = self.sig.read();
write!(f, "Signature{{{:?}}}", sig)
}
}
impl Default for Signature {
fn default() -> Signature {
Signature {
sig: Arc::new(RwLock::new(Sig::default())),
}
}
}
impl PartialEq for Signature {
fn eq(&self, other: &Signature) -> bool {
self.sig
.read()
.expect("poisoned signature")
.eq(&other.sig.read().expect("poisoned signature"))
}
}
impl Eq for Signature {}
impl Hash for Signature {
fn hash<H: Hasher>(&self, state: &mut H) {
self.sig.read().expect("poisoned signature").hash(state);
}
}
#[derive(Clone, Debug)]
pub(crate) struct Sig {
/// Stores the (arity, name) for every [`Operator`].
/// [`Operator`]: struct.Operator.html
pub(crate) operators: Vec<(u32, Option<String>)>,
/// Stores the name for every [`Variable`].
/// [`Variable`]: struct.Variable.html
pub(crate) variables: Vec<Option<String>>,
}
impl Sig {
pub fn new(operator_spec: Vec<(u32, Option<String>)>) -> Sig {
Sig {
operators: operator_spec,
variables: vec![],
}
}
pub fn operators(&self) -> Vec<usize> {
(0..self.operators.len()).collect()
}
pub fn variables(&self) -> Vec<usize> {
(0..self.variables.len()).collect()
}
pub fn new_op(&mut self, arity: u32, name: Option<String>) -> usize {
self.operators.push((arity, name));
self.operators.len() - 1
}
pub fn new_var(&mut self, name: Option<String>) -> usize {
self.variables.push(name);
self.variables.len() - 1
}
pub fn merge(
&mut self,
other: &Signature,
strategy: MergeStrategy,
) -> Result<SignatureChange, ()> {
let mut other = other.sig.write().expect("poisoned signature");
let op_map =
match strategy {
MergeStrategy::SameOperators => {
let mut temp_map = HashMap::default();
if self.operators.len() == other.operators.len()
&& self.operators.iter().zip(&other.operators).all(
|((arity1, op1), (arity2, op2))| *arity1 == *arity2 && *op1 == *op2,
)
{
for idx in 0..self.operators.len() {
temp_map.insert(idx, idx);
}
} else {
return Err(());
}
temp_map
}
MergeStrategy::OperatorsByArityAndName => {
let old_len = self.operators.len();
let mut new_idx = old_len;
let mut temp_map = HashMap::default();
for (op, idx) in other.operators.iter().zip(0..other.operators.len()) {
if self.operators.contains(&op) {
for original_idx in 0..self.operators.len() {
if self.operators[original_idx] == *op {
temp_map.insert(idx, original_idx);
break;
}
}
} else {
self.operators.push(op.clone());
temp_map.insert(idx, new_idx);
new_idx += 1;
}
}
temp_map
}
MergeStrategy::DistinctOperators => {
let mut new_idx = self.operators.len();
let mut temp_map = HashMap::default();
for idx in 0..other.operators.len() {
temp_map.insert(idx, new_idx);
new_idx += 1;
}
self.operators.append(&mut other.operators);
temp_map
}
};
let delta_var = self.variables.len();
self.variables.append(&mut other.variables);
Ok(SignatureChange { op_map, delta_var })
}
}
impl Default for Sig {
fn default() -> Sig {
Sig {
operators: Vec::new(),
variables: Vec::new(),
}
}
}
impl Hash for Sig {
fn hash<H: Hasher>(&self, state: &mut H) {
self.variables.hash(state);
self.operators.hash(state);
}
}
impl PartialEq for Sig {
fn eq(&self, other: &Sig) -> bool {
self.variables.len() == other.variables.len()
&& self.operators.len() == other.operators.len()
&& self
.operators
.iter()
.zip(&other.operators)
.all(|(&(arity1, _), &(arity2, _))| arity1 == arity2)
}
}
/// Specifies how to merge two signatures.
/// See [`Signature::merge`].
///
/// [`Signature::merge`]: struct.Signature.html#method.merge
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum MergeStrategy {
/// Operators won't be added to the signature:
/// this must mean all operators were added in the same order for both
/// signatures.
SameOperators,
/// Operators are added to the signature only when there is no existing
/// operator with the same arity and name.
OperatorsByArityAndName,
/// Operators are always added distinctly:
/// no operators associated with the first signature will every equate to
/// those associated with the second signature.
DistinctOperators,
}
/// Allows [`Term`]s/[`Rule`]s/[`TRS`]s to be reified for use with another [`Signature`].
/// See [`Signature::merge`].
///
/// [`Signature::merge`]: struct.Signature.html#method.merge
/// [`Term`]: struct.Term.html
/// [`Rule`]: struct.Rule.html
/// [`TRS`]: struct.TRS.html
/// [`Signature`]: struct.Signature.html
///
/// # Examples
///
/// ```
/// # use term_rewriting::{MergeStrategy, Signature, parse_term, parse_trs};
/// let mut sig1 = Signature::new(vec![
/// (2, Some(".".to_string())),
/// (0, Some("S".to_string())),
/// (0, Some("K".to_string())),
/// ]);
/// let mut sig2 = Signature::default();
///
/// let term = parse_term(&mut sig2, "A B").unwrap();
///
/// assert_eq!(term.pretty(), "A B");
///
/// let sigchange = sig1.merge(&sig2, MergeStrategy::OperatorsByArityAndName).unwrap();
///
/// let ops: Vec<String> = sig1.operators().iter().map(|op| op.display()).collect();
///
/// assert_eq!(ops, vec![".", "S", "K", "A", "B"]);
///
/// let term = sigchange.reify_term(&sig1, term);
///
/// assert_eq!(term.pretty(), "A B");
/// ```
pub struct SignatureChange {
op_map: HashMap<usize, usize>,
delta_var: usize,
}
impl SignatureChange {
/// Reifies [`Term`] for use with another [`Signature`].
///
/// [`Term`]: struct.Term.html
/// [`Signature`]: struct.Signature.html
///
/// # Examples
///
/// ```
/// # use term_rewriting::{MergeStrategy, Signature, parse_term, parse_trs};
/// let mut sig1 = Signature::new(vec![
/// (2, Some(".".to_string())),
/// (0, Some("S".to_string())),
/// (0, Some("K".to_string())),
/// ]);
/// let mut sig2 = Signature::default();
///
/// let term = parse_term(&mut sig2, "A B").unwrap();
///
/// let sigchange = sig1.merge(&sig2, MergeStrategy::DistinctOperators).unwrap();
///
/// let term = sigchange.reify_term(&sig1, term);
///
/// assert_eq!(term.pretty(), "A B");
/// ```
pub fn reify_term(&self, sig: &Signature, term: Term) -> Term {
match term {
Term::Variable(Variable { id, .. }) => {
let id = id + self.delta_var;
Term::Variable(Variable {
id,
sig: sig.clone(),
})
}
Term::Application {
op: Operator { id, .. },
args,
} => {
let id = self.op_map[&id];
Term::Application {
op: Operator {
id,
sig: sig.clone(),
},
args: args.into_iter().map(|t| self.reify_term(sig, t)).collect(),
}
}
}
}
/// Reifies [`Context`] for use with another [`Signature`].
///
/// [`Context`]: struct.Context.html
/// [`Signature`]: struct.Signature.html
///
/// # Examples
///
/// ```
/// # use term_rewriting::{MergeStrategy, Signature, Context, parse_context};
/// let mut sig1 = Signature::new(vec![
/// (2, Some(".".to_string())),
/// (0, Some("S".to_string())),
/// (0, Some("K".to_string())),
/// ]);
/// let mut sig2 = Signature::default();
///
/// let context = parse_context(&mut sig2, "A([!] B)").expect("parse of A([!] B)");
///
/// let sigchange = sig1.merge(&sig2, MergeStrategy::OperatorsByArityAndName).unwrap();
///
/// let context = sigchange.reify_context(&sig1, context);
///
/// assert_eq!(context.pretty(), "A([!], B)");
/// ```
pub fn reify_context(&self, sig: &Signature, context: Context) -> Context {
match context {
Context::Hole => Context::Hole,
Context::Variable(Variable { id, .. }) => {
let id = id + self.delta_var;
Context::Variable(Variable {
id,
sig: sig.clone(),
})
}
Context::Application {
op: Operator { id, .. },
args,
} => {
let id = self.op_map[&id];
Context::Application {
op: Operator {
id,
sig: sig.clone(),
},
args: args
.into_iter()
.map(|t| self.reify_context(sig, t))
.collect(),
}
}
}
}
/// Reifies [`Rule`] for use with another [`Signature`].
///
/// [`Rule`]: struct.Rule.html
/// [`Signature`]: struct.Signature.html
///
/// # Examples
///
/// ```
/// # use term_rewriting::{MergeStrategy, Signature, parse_rule, parse_trs};
/// let mut sig1 = Signature::new(vec![
/// (2, Some(".".to_string())),
/// (0, Some("S".to_string())),
/// (0, Some("K".to_string())),
/// ]);
/// let mut sig2 = Signature::default();
///
/// let rule = parse_rule(&mut sig2, "A = B | C").unwrap();
///
/// let sigchange = sig1.merge(&sig2, MergeStrategy::OperatorsByArityAndName).unwrap();
///
/// let rule = sigchange.reify_rule(&sig1, rule);
///
/// assert_eq!(rule.pretty(), "A = B | C");
/// ```
pub fn reify_rule(&self, sig: &Signature, rule: Rule) -> Rule {
let Rule { lhs, rhs } = rule;
let lhs = self.reify_term(sig, lhs);
let rhs = rhs.into_iter().map(|t| self.reify_term(sig, t)).collect();
Rule { lhs, rhs }
}
/// Reifies [`TRS`] for use with another [`Signature`].
///
/// [`TRS`]: struct.TRS.html
/// [`Signature`]: struct.Signature.html
///
/// # Examples
///
/// ```
/// # use term_rewriting::{MergeStrategy, Signature, parse_trs};
/// let mut sig1 = Signature::new(vec![
/// (2, Some(".".to_string())),
/// (0, Some("S".to_string())),
/// (0, Some("K".to_string())),
/// ]);
/// let mut sig2 = Signature::default();
///
/// let trs = parse_trs(&mut sig2,
/// "A = B;
/// C = B;").unwrap();
///
/// let sigchange = sig1.merge(&sig2, MergeStrategy::OperatorsByArityAndName).unwrap();
///
/// let trs = sigchange.reify_trs(&sig1, trs);
///
/// assert_eq!(trs.pretty(),
/// "A = B;
/// C = B;");
/// ```
pub fn reify_trs(&self, sig: &Signature, trs: TRS) -> TRS {
let rules = trs
.rules
.into_iter()
.map(|r| self.reify_rule(sig, r))
.collect();
TRS { rules, ..trs }
}
}
#[cfg(test)]
mod tests {
use super::super::super::parser::*;
use super::super::Signature;
use super::*;
#[test]
fn new_test() {
let mut sig = Signature::new(vec![
(2, Some(".".to_string())),
(0, Some("S".to_string())),
(0, Some("K".to_string())),
]);
let mut ops = sig.operators();
let mut op_names: Vec<String> = ops.iter().map(|op| op.display()).collect();
assert_eq!(op_names, vec![".", "S", "K"]);
let mut sig2 = Signature::default();
sig2.new_op(2, Some(".".to_string()));
sig2.new_op(0, Some("S".to_string()));
sig2.new_op(0, Some("K".to_string()));
ops = sig2.operators();
op_names = ops.iter().map(|op| op.display()).collect();
assert_eq!(op_names, vec![".", "S", "K"]);
assert_eq!(sig, sig2);
sig = Signature::new(vec![]);
sig2 = Signature::default();
assert_eq!(sig, sig2);
}
#[test]
#[ignore]
fn operators_test() {
let sig = Signature::new(vec![
(2, Some(".".to_string())),
(0, Some("S".to_string())),
(0, Some("K".to_string())),
]);
let ops: Vec<String> = sig.operators().iter().map(|op| op.display()).collect();;
assert_eq!(ops, vec![".", "S", "K"]);
}
#[test]
#[ignore]
fn variables_test() {
let mut sig = Signature::new(vec![
(2, Some(".".to_string())),
(0, Some("S".to_string())),
(0, Some("K".to_string())),
]);
parse_term(&mut sig, "A(x_ y_)").expect("parse of A(x_ y_)");
let vars: Vec<String> = sig.variables().iter().map(|v| v.display()).collect();
assert_eq!(vars, vec!["x_", "y_"]);
}
#[test]
fn atoms_test() {
let mut sig = Signature::default();
parse_term(&mut sig, "A(x_ B(y_))").expect("parse of A(x_ B(y_))");
let atoms: Vec<String> = sig.atoms().iter().map(|a| a.display()).collect();
assert_eq!(atoms, vec!["x_", "y_", "B", "A"]);
}
#[test]
#[ignore]
fn new_op_test() {
let mut sig = Signature::default();
let a = sig.new_op(1, Some(".".to_string()));
let s = sig.new_op(2, Some("S".to_string()));
let s2 = sig.new_op(2, Some("S".to_string()));
assert_ne!(a, s);
assert_ne!(a, s2);
assert_ne!(s, s2);
}
#[test]
#[ignore]
fn new_var() {
let mut sig = Signature::default();
let z = sig.new_var(Some("z".to_string()));
let z2 = sig.new_var(Some("z".to_string()));
assert_ne!(z, z2);
}
#[test]
fn signature_merge_test() {
// Merging 2 signatures by assuming all operators in the second are distinct from the first.
let sig1 = Signature::new(vec![
(2, Some(".".to_string())),
(0, Some("S".to_string())),
(0, Some("K".to_string())),
]);
let sig2 = Signature::new(vec![
(2, Some("A".to_string())),
(1, Some("B".to_string())),
(0, Some("C".to_string())),
]);
sig1.merge(&sig2, MergeStrategy::DistinctOperators)
.expect("merge of distinct operators");
let ops: Vec<String> = sig1.operators().iter().map(|op| op.display()).collect();
assert_eq!(ops, vec![".", "S", "K", "A", "B", "C"]);
// Merging 2 signatures by assuming all operators in the second are the same from the first.
let sig1 = Signature::new(vec![
(2, Some(".".to_string())),
(0, Some("S".to_string())),
(0, Some("K".to_string())),
]);
let sig2 = Signature::new(vec![
(2, Some(".".to_string())),
(0, Some("S".to_string())),
(0, Some("K".to_string())),
]);
sig1.merge(&sig2, MergeStrategy::SameOperators)
.expect("merge of same operators");
let ops: Vec<String> = sig1.operators().iter().map(|op| op.display()).collect();
assert_eq!(ops, vec![".", "S", "K"]);
// Merging 2 signatures by SameOperators should fail if all operators in both signatures are not the same.
let sig1 = Signature::new(vec![
(2, Some(".".to_string())),
(0, Some("S".to_string())),
(0, Some("K".to_string())),
]);
let sig2 = Signature::new(vec![
(2, Some(".".to_string())),
(1, Some("S".to_string())),
(0, Some("K".to_string())),
]);
assert!(sig1.merge(&sig2, MergeStrategy::SameOperators).is_err());
// Merging 2 signatures assuming any operators with the same name and arity are the same.
let sig1 = Signature::new(vec![
(2, Some(".".to_string())),
(0, Some("S".to_string())),
(0, Some("K".to_string())),
]);
let sig2 = Signature::new(vec![
(2, Some("A".to_string())),
(1, Some("B".to_string())),
(0, Some("K".to_string())),
]);
sig1.merge(&sig2, MergeStrategy::OperatorsByArityAndName)
.expect("merge of same arity and name");
let ops: Vec<String> = sig1.operators().iter().map(|op| op.display()).collect();
assert_eq!(ops, vec![".", "S", "K", "A", "B"]);
}
#[test]
fn sig_merge_test() {}
#[test]
fn reify_term_test() {
let sig1 = Signature::new(vec![
(2, Some(".".to_string())),
(0, Some("S".to_string())),
(0, Some("K".to_string())),
]);
let mut sig2 = Signature::default();
let term = parse_term(&mut sig2, "A B").unwrap();
let sigchange = sig1.merge(&sig2, MergeStrategy::DistinctOperators).unwrap();
let term = sigchange.reify_term(&sig1, term);
assert_eq!(term.pretty(), "A B");
}
#[test]
fn reify_context_test() {
let sig1 = Signature::new(vec![
(2, Some(".".to_string())),
(0, Some("S".to_string())),
(0, Some("K".to_string())),
]);
let mut sig2 = Signature::default();
let context = parse_context(&mut sig2, "A([!] B)").expect("parse of A([!] B)");
let sigchange = sig1
.merge(&sig2, MergeStrategy::OperatorsByArityAndName) | .unwrap();
let context = sigchange.reify_context(&sig1, context);
assert_eq!(context.pretty(), "A([!], B)");
}
#[test]
fn reify_rule_test() {
let sig1 = Signature::new(vec![
(2, Some(".".to_string())),
(0, Some("S".to_string())),
(0, Some("K".to_string())),
]);
let mut sig2 = Signature::default();
let rule = parse_rule(&mut sig2, "A = B | C").unwrap();
let sigchange = sig1
.merge(&sig2, MergeStrategy::OperatorsByArityAndName)
.unwrap();
let rule = sigchange.reify_rule(&sig1, rule);
assert_eq!(rule.pretty(), "A = B | C");
}
#[test]
fn reify_trs_test() {
let sig1 = Signature::new(vec![
(2, Some(".".to_string())),
(0, Some("S".to_string())),
(0, Some("K".to_string())),
]);
let mut sig2 = Signature::default();
let trs = parse_trs(&mut sig2, "A = B;\nC = B;").unwrap();
let sigchange = sig1
.merge(&sig2, MergeStrategy::OperatorsByArityAndName)
.unwrap();
let trs = sigchange.reify_trs(&sig1, trs);
assert_eq!(trs.pretty(), "A = B;\nC = B;");
}
} | |
lib.rs | pub trait Summary {
fn summarize(&self) -> String {
return String::from("default summary implementation");
}
}
pub struct NewsArticle {
pub headline: String,
pub location: String,
pub author: String,
pub content: String,
}
// impl Summary for NewsArticle {} // get the default implementation
impl Summary for NewsArticle {
fn summarize(&self) -> String {
format!(
"{}, by {} ({}) len={}",
self.headline,
self.author,
self.location,
self.content.len()
)
}
}
pub struct Tweet {
pub username: String,
pub content: String,
pub reply: bool,
pub retweet: bool,
} | format!(
"{}: {} retweet={}, reply={}",
self.username, self.content, self.retweet, self.reply
)
}
} |
impl Summary for Tweet {
fn summarize(&self) -> String { |
crd.go | package crd
import (
"context"
"encoding/json"
"fmt"
"reflect"
"strings"
"time"
aadpodid "github.com/Azure/aad-pod-identity/pkg/apis/aadpodidentity"
aadpodv1 "github.com/Azure/aad-pod-identity/pkg/apis/aadpodidentity/v1"
"github.com/Azure/aad-pod-identity/pkg/metrics"
"github.com/Azure/aad-pod-identity/pkg/stats"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/informers/internalinterfaces"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/klog/v2"
)
const (
finalizerName = "azureassignedidentity.finalizers.aadpodidentity.k8s.io"
)
// Client represents all the watchers
type Client struct {
rest *rest.RESTClient
BindingInformer cache.SharedInformer
IDInformer cache.SharedInformer
AssignedIDInformer cache.SharedInformer
PodIdentityExceptionInformer cache.SharedInformer
reporter *metrics.Reporter
}
// ClientInt is an abstraction used to interact with CRDs.
type ClientInt interface {
Start(exit <-chan struct{})
SyncCache(exit <-chan struct{}, initial bool, cacheSyncs ...cache.InformerSynced)
SyncCacheAll(exit <-chan struct{}, initial bool)
RemoveAssignedIdentity(assignedIdentity *aadpodid.AzureAssignedIdentity) error
CreateAssignedIdentity(assignedIdentity *aadpodid.AzureAssignedIdentity) error
UpdateAssignedIdentity(assignedIdentity *aadpodid.AzureAssignedIdentity) error
UpdateAzureAssignedIdentityStatus(assignedIdentity *aadpodid.AzureAssignedIdentity, status string) error
UpgradeAll() error
ListBindings() (res *[]aadpodid.AzureIdentityBinding, err error)
ListAssignedIDs() (res *[]aadpodid.AzureAssignedIdentity, err error)
ListAssignedIDsInMap() (res map[string]aadpodid.AzureAssignedIdentity, err error)
ListIds() (res *[]aadpodid.AzureIdentity, err error)
ListPodIds(podns, podname string) (map[string][]aadpodid.AzureIdentity, error)
ListPodIdentityExceptions(ns string) (res *[]aadpodid.AzurePodIdentityException, err error)
}
// NewCRDClientLite returns a new CRD lite client and error if any.
func NewCRDClientLite(config *rest.Config, nodeName string, scale, isStandardMode bool) (*Client, error) {
restClient, err := newRestClient(config)
if err != nil {
return nil, err
}
var assignedIDListInformer, bindingListInformer, idListInformer cache.SharedInformer
// assigned identity informer is required only for standard mode
if isStandardMode {
var assignedIDListWatch *cache.ListWatch
if scale {
assignedIDListWatch = newAssignedIDNodeListWatch(restClient, nodeName)
} else {
assignedIDListWatch = newAssignedIDListWatch(restClient)
}
assignedIDListInformer, err = newAssignedIDInformer(assignedIDListWatch)
if err != nil {
return nil, err
}
} else {
// creating binding and identity list informers for non standard mode
if bindingListInformer, err = newBindingInformerLite(newBindingListWatch(restClient)); err != nil {
return nil, err
}
if idListInformer, err = newIDInformerLite(newIDListWatch(restClient)); err != nil {
return nil, err
}
}
podIdentityExceptionListWatch := newPodIdentityExceptionListWatch(restClient)
podIdentityExceptionInformer, err := newPodIdentityExceptionInformer(podIdentityExceptionListWatch)
if err != nil {
return nil, err
}
reporter, err := metrics.NewReporter()
if err != nil {
return nil, fmt.Errorf("failed to create reporter for metrics, error: %+v", err)
}
return &Client{
AssignedIDInformer: assignedIDListInformer,
PodIdentityExceptionInformer: podIdentityExceptionInformer,
BindingInformer: bindingListInformer,
IDInformer: idListInformer,
rest: restClient,
reporter: reporter,
}, nil
}
// NewCRDClient returns a new CRD client and error if any.
func NewCRDClient(config *rest.Config, eventCh chan aadpodid.EventType) (*Client, error) {
restClient, err := newRestClient(config)
if err != nil {
return nil, err
}
bindingListWatch := newBindingListWatch(restClient)
bindingInformer, err := newBindingInformer(restClient, eventCh, bindingListWatch)
if err != nil {
return nil, err
}
idListWatch := newIDListWatch(restClient)
idInformer, err := newIDInformer(restClient, eventCh, idListWatch)
if err != nil {
return nil, err
}
assignedIDListWatch := newAssignedIDListWatch(restClient)
assignedIDListInformer, err := newAssignedIDInformer(assignedIDListWatch)
if err != nil {
return nil, err
}
reporter, err := metrics.NewReporter()
if err != nil {
return nil, fmt.Errorf("failed to create reporter for metrics, error: %+v", err)
}
return &Client{
rest: restClient,
BindingInformer: bindingInformer,
IDInformer: idInformer,
AssignedIDInformer: assignedIDListInformer,
reporter: reporter,
}, nil
}
func newRestClient(config *rest.Config) (*rest.RESTClient, error) {
crdconfig := *config
crdconfig.GroupVersion = &aadpodv1.SchemeGroupVersion
crdconfig.APIPath = "/apis"
crdconfig.ContentType = runtime.ContentTypeJSON
scheme := runtime.NewScheme()
if err := aadpodv1.AddToScheme(scheme); err != nil {
return nil, err
}
if err := clientgoscheme.AddToScheme(scheme); err != nil {
return nil, err
}
crdconfig.NegotiatedSerializer = serializer.WithoutConversionCodecFactory{CodecFactory: serializer.NewCodecFactory(scheme)}
// Client interacting with our CRDs
restClient, err := rest.RESTClientFor(&crdconfig)
if err != nil {
return nil, err
}
return restClient, nil
}
func newBindingListWatch(r *rest.RESTClient) *cache.ListWatch {
return cache.NewListWatchFromClient(r, aadpodv1.AzureIDBindingResource, v1.NamespaceAll, fields.Everything())
}
func newBindingInformer(r *rest.RESTClient, eventCh chan aadpodid.EventType, lw *cache.ListWatch) (cache.SharedInformer, error) {
azBindingInformer := cache.NewSharedInformer(
lw,
&aadpodv1.AzureIdentityBinding{},
time.Minute*10)
if azBindingInformer == nil {
return nil, fmt.Errorf("failed to create watcher for %s", aadpodv1.AzureIDBindingResource)
}
azBindingInformer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
klog.V(6).Infof("binding created")
eventCh <- aadpodid.BindingCreated
},
DeleteFunc: func(obj interface{}) {
klog.V(6).Infof("binding deleted")
eventCh <- aadpodid.BindingDeleted
},
UpdateFunc: func(OldObj, newObj interface{}) {
klog.V(6).Infof("binding updated")
eventCh <- aadpodid.BindingUpdated
},
},
)
return azBindingInformer, nil
}
func newIDListWatch(r *rest.RESTClient) *cache.ListWatch {
return cache.NewListWatchFromClient(r, aadpodv1.AzureIDResource, v1.NamespaceAll, fields.Everything())
}
func newIDInformer(r *rest.RESTClient, eventCh chan aadpodid.EventType, lw *cache.ListWatch) (cache.SharedInformer, error) {
azIDInformer := cache.NewSharedInformer(
lw,
&aadpodv1.AzureIdentity{},
time.Minute*10)
if azIDInformer == nil {
return nil, fmt.Errorf("failed to create watcher for %s", aadpodv1.AzureIDResource)
}
azIDInformer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
klog.V(6).Infof("identity created")
eventCh <- aadpodid.IdentityCreated
},
DeleteFunc: func(obj interface{}) {
klog.V(6).Infof("identity deleted")
eventCh <- aadpodid.IdentityDeleted
},
UpdateFunc: func(OldObj, newObj interface{}) {
klog.V(6).Infof("identity updated")
eventCh <- aadpodid.IdentityUpdated
},
},
)
return azIDInformer, nil
}
// NodeNameFilter - CRDs do not yet support field selectors. Instead of that we
// apply labels with node name and then later use the NodeNameFilter to tweak
// options to filter using nodename label.
func NodeNameFilter(nodeName string) internalinterfaces.TweakListOptionsFunc {
return func(l *v1.ListOptions) {
if l == nil {
l = &v1.ListOptions{}
}
l.LabelSelector = l.LabelSelector + "nodename=" + nodeName
}
}
func newAssignedIDNodeListWatch(r *rest.RESTClient, nodeName string) *cache.ListWatch {
return cache.NewFilteredListWatchFromClient(r, aadpodv1.AzureAssignedIDResource, v1.NamespaceAll, NodeNameFilter(nodeName))
}
func newAssignedIDListWatch(r *rest.RESTClient) *cache.ListWatch {
return cache.NewListWatchFromClient(r, aadpodv1.AzureAssignedIDResource, v1.NamespaceAll, fields.Everything())
}
func newAssignedIDInformer(lw *cache.ListWatch) (cache.SharedInformer, error) {
azAssignedIDInformer := cache.NewSharedInformer(lw, &aadpodv1.AzureAssignedIdentity{}, time.Minute*10)
if azAssignedIDInformer == nil {
return nil, fmt.Errorf("failed to create %s informer", aadpodv1.AzureAssignedIDResource)
}
return azAssignedIDInformer, nil
}
func newBindingInformerLite(lw *cache.ListWatch) (cache.SharedInformer, error) {
azBindingInformer := cache.NewSharedInformer(lw, &aadpodv1.AzureIdentityBinding{}, time.Minute*10)
if azBindingInformer == nil {
return nil, fmt.Errorf("failed to create %s informer", aadpodv1.AzureIDBindingResource)
}
return azBindingInformer, nil
}
func newIDInformerLite(lw *cache.ListWatch) (cache.SharedInformer, error) {
azIDInformer := cache.NewSharedInformer(lw, &aadpodv1.AzureIdentity{}, time.Minute*10)
if azIDInformer == nil {
return nil, fmt.Errorf("failed to create %s informer", aadpodv1.AzureIDResource)
}
return azIDInformer, nil
}
func newPodIdentityExceptionListWatch(r *rest.RESTClient) *cache.ListWatch {
optionsModifier := func(options *v1.ListOptions) {}
return cache.NewFilteredListWatchFromClient(
r,
aadpodv1.AzurePodIdentityExceptionResource,
v1.NamespaceAll,
optionsModifier,
)
}
func newPodIdentityExceptionInformer(lw *cache.ListWatch) (cache.SharedInformer, error) {
azPodIDExceptionInformer := cache.NewSharedInformer(lw, &aadpodv1.AzurePodIdentityException{}, time.Minute*10)
if azPodIDExceptionInformer == nil {
return nil, fmt.Errorf("failed to create %s informer", aadpodv1.AzurePodIdentityExceptionResource)
}
return azPodIDExceptionInformer, nil
}
func (c *Client) getObjectList(resource string, i runtime.Object) (runtime.Object, error) {
options := v1.ListOptions{}
do := c.rest.Get().Namespace(v1.NamespaceAll).Resource(resource).VersionedParams(&options, v1.ParameterCodec).Do(context.TODO())
body, err := do.Raw()
if err != nil {
return nil, fmt.Errorf("failed to get %s, error: %+v", resource, err)
}
err = json.Unmarshal(body, &i)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal to object %T, error: %+v", i, err)
}
return i, err
}
func (c *Client) setObject(resource, ns, name string, i interface{}, obj runtime.Object) error {
err := c.rest.Put().Namespace(ns).Resource(resource).Name(name).Body(i).Do(context.TODO()).Into(obj)
if err != nil {
return fmt.Errorf("failed to set object for resource %s, error: %+v", resource, err)
}
return nil
}
// Upgrade performs type upgrade to a specific aad-pod-identity CRD.
func (c *Client) Upgrade(resource string, i runtime.Object) (map[string]runtime.Object, error) {
m := make(map[string]runtime.Object)
i, err := c.getObjectList(resource, i)
if err != nil {
return m, err
}
list, err := meta.ExtractList(i)
if err != nil {
return m, fmt.Errorf("failed to extract list for resource %s, error: %+v", resource, err)
}
for _, item := range list {
o, err := meta.Accessor(item)
if err != nil {
return m, fmt.Errorf("failed to get object for resource %s, error: %+v", resource, err)
}
switch resource {
case aadpodv1.AzureIDResource:
var obj aadpodv1.AzureIdentity
err = c.setObject(resource, o.GetNamespace(), o.GetName(), o, &obj)
if err != nil {
return m, err
}
obj.TypeMeta = metav1.TypeMeta{
APIVersion: aadpodv1.SchemeGroupVersion.String(),
Kind: reflect.ValueOf(i).Elem().Type().Name(),
}
m[getMapKey(o.GetNamespace(), o.GetName())] = &obj
case aadpodv1.AzureIDBindingResource:
var obj aadpodv1.AzureIdentityBinding
err = c.setObject(resource, o.GetNamespace(), o.GetName(), o, &obj)
if err != nil {
return m, err
}
obj.TypeMeta = metav1.TypeMeta{
APIVersion: aadpodv1.SchemeGroupVersion.String(),
Kind: reflect.ValueOf(i).Elem().Type().Name(),
}
m[getMapKey(o.GetNamespace(), o.GetName())] = &obj
default:
err = c.setObject(resource, o.GetNamespace(), o.GetName(), o, nil)
if err != nil {
return m, err
}
}
}
return m, nil
}
// UpgradeAll performs type upgrade to for all aad-pod-identity CRDs.
func (c *Client) UpgradeAll() error {
updatedAzureIdentities, err := c.Upgrade(aadpodv1.AzureIDResource, &aadpodv1.AzureIdentityList{})
if err != nil {
return err
}
updatedAzureIdentityBindings, err := c.Upgrade(aadpodv1.AzureIDBindingResource, &aadpodv1.AzureIdentityBindingList{})
if err != nil {
return err
}
_, err = c.Upgrade(aadpodv1.AzurePodIdentityExceptionResource, &aadpodv1.AzurePodIdentityExceptionList{})
if err != nil {
return err
}
// update azure assigned identities separately as we need to use the latest
// updated azure identity and binding as ref. Doing this will ensure upgrade does
// not trigger any sync cycles
i, err := c.getObjectList(aadpodv1.AzureAssignedIDResource, &aadpodv1.AzureAssignedIdentityList{})
if err != nil {
return err
}
list, err := meta.ExtractList(i)
if err != nil {
return fmt.Errorf("failed to extract list for resource: %s, error: %+v", aadpodv1.AzureAssignedIDResource, err)
}
for _, item := range list {
o, err := meta.Accessor(item)
if err != nil {
return fmt.Errorf("failed to get object for resource: %s, error: %+v", aadpodv1.AzureAssignedIDResource, err)
}
obj := o.(*aadpodv1.AzureAssignedIdentity)
idName := obj.Spec.AzureIdentityRef.Name
idNamespace := obj.Spec.AzureIdentityRef.Namespace
bindingName := obj.Spec.AzureBindingRef.Name
bindingNamespace := obj.Spec.AzureBindingRef.Namespace
if v, exists := updatedAzureIdentities[getMapKey(idNamespace, idName)]; exists && v != nil {
obj.Spec.AzureIdentityRef = v.(*aadpodv1.AzureIdentity)
}
if v, exists := updatedAzureIdentityBindings[getMapKey(bindingNamespace, bindingName)]; exists && v != nil {
obj.Spec.AzureBindingRef = v.(*aadpodv1.AzureIdentityBinding)
}
err = c.setObject(aadpodv1.AzureAssignedIDResource, o.GetNamespace(), o.GetName(), obj, nil)
if err != nil {
return err
}
}
return nil
}
// StartLite to be used only case of lite client
func (c *Client) StartLite(exit <-chan struct{}) {
var cacheHasSynced []cache.InformerSynced
if c.AssignedIDInformer != nil {
go c.AssignedIDInformer.Run(exit)
cacheHasSynced = append(cacheHasSynced, c.AssignedIDInformer.HasSynced)
}
if c.BindingInformer != nil {
go c.BindingInformer.Run(exit)
cacheHasSynced = append(cacheHasSynced, c.BindingInformer.HasSynced)
}
if c.IDInformer != nil {
go c.IDInformer.Run(exit)
cacheHasSynced = append(cacheHasSynced, c.IDInformer.HasSynced)
}
if c.PodIdentityExceptionInformer != nil {
go c.PodIdentityExceptionInformer.Run(exit)
cacheHasSynced = append(cacheHasSynced, c.PodIdentityExceptionInformer.HasSynced)
}
c.SyncCache(exit, true, cacheHasSynced...)
klog.Info("CRD lite informers started ")
}
// Start starts all informer routines to watch for CRD-related changes.
func (c *Client) Start(exit <-chan struct{}) {
go c.BindingInformer.Run(exit)
go c.IDInformer.Run(exit)
go c.AssignedIDInformer.Run(exit)
c.SyncCache(exit, true, c.BindingInformer.HasSynced, c.IDInformer.HasSynced, c.AssignedIDInformer.HasSynced)
klog.Info("CRD informers started")
}
// SyncCache synchronizes cache
func (c *Client) SyncCache(exit <-chan struct{}, initial bool, cacheSyncs ...cache.InformerSynced) {
if !cache.WaitForCacheSync(exit, cacheSyncs...) {
if !initial {
klog.Errorf("cache failed to be synchronized")
return
}
panic("Cache failed to be synchronized")
}
}
// SyncCacheAll - sync all caches related to the client.
func (c *Client) SyncCacheAll(exit <-chan struct{}, initial bool) {
c.SyncCache(exit, initial, c.BindingInformer.HasSynced, c.IDInformer.HasSynced, c.AssignedIDInformer.HasSynced)
}
// RemoveAssignedIdentity removes the assigned identity
func (c *Client) RemoveAssignedIdentity(assignedIdentity *aadpodid.AzureAssignedIdentity) (err error) {
klog.V(6).Infof("deleting assigned id %s/%s", assignedIdentity.Namespace, assignedIdentity.Name)
begin := time.Now()
defer func() {
if err != nil {
merr := c.reporter.ReportKubernetesAPIOperationError(metrics.AssignedIdentityDeletionOperationName)
if merr != nil {
klog.Warningf("failed to report metrics, error: %+v", merr)
}
return
}
c.reporter.Report(
metrics.AssignedIdentityDeletionCountM.M(1), | err = c.rest.Delete().Namespace(assignedIdentity.Namespace).Resource(aadpodid.AzureAssignedIDResource).Name(assignedIdentity.Name).Do(context.TODO()).Into(&res)
if apierrors.IsNotFound(err) {
return nil
}
if err != nil {
return err
}
if hasFinalizer(&res) {
removeFinalizer(&res)
// update the assigned identity without finalizer and resource will be garbage collected
err = c.rest.Put().Namespace(assignedIdentity.Namespace).Resource(aadpodid.AzureAssignedIDResource).Name(assignedIdentity.Name).Body(&res).Do(context.TODO()).Error()
}
klog.V(5).Infof("deleting %s took: %v", assignedIdentity.Name, time.Since(begin))
stats.AggregateConcurrent(stats.DeleteAzureAssignedIdentity, begin, time.Now())
return err
}
// CreateAssignedIdentity creates new assigned identity
func (c *Client) CreateAssignedIdentity(assignedIdentity *aadpodid.AzureAssignedIdentity) (err error) {
klog.Infof("creating assigned id %s/%s", assignedIdentity.Namespace, assignedIdentity.Name)
begin := time.Now()
defer func() {
if err != nil {
merr := c.reporter.ReportKubernetesAPIOperationError(metrics.AssignedIdentityAdditionOperationName)
if merr != nil {
klog.Warningf("failed to report metrics, error: %+v", merr)
}
return
}
c.reporter.Report(
metrics.AssignedIdentityAdditionCountM.M(1),
metrics.AssignedIdentityAdditionDurationM.M(metrics.SinceInSeconds(begin)))
}()
var res aadpodv1.AzureAssignedIdentity
v1AssignedID := aadpodv1.ConvertInternalAssignedIdentityToV1AssignedIdentity(*assignedIdentity)
if !hasFinalizer(&v1AssignedID) {
v1AssignedID.SetFinalizers(append(v1AssignedID.GetFinalizers(), finalizerName))
}
err = c.rest.Post().Namespace(assignedIdentity.Namespace).Resource(aadpodid.AzureAssignedIDResource).Body(&v1AssignedID).Do(context.TODO()).Into(&res)
if err != nil {
return err
}
klog.V(5).Infof("time taken to create %s/%s: %v", assignedIdentity.Namespace, assignedIdentity.Name, time.Since(begin))
stats.AggregateConcurrent(stats.CreateAzureAssignedIdentity, begin, time.Now())
return nil
}
// UpdateAssignedIdentity updates an existing assigned identity
func (c *Client) UpdateAssignedIdentity(assignedIdentity *aadpodid.AzureAssignedIdentity) (err error) {
klog.Infof("updating assigned id %s/%s", assignedIdentity.Namespace, assignedIdentity.Name)
begin := time.Now()
defer func() {
if err != nil {
merr := c.reporter.ReportKubernetesAPIOperationError(metrics.AssignedIdentityUpdateOperationName)
klog.Warningf("failed to report metrics, error: %+v", merr)
return
}
c.reporter.Report(
metrics.AssignedIdentityUpdateCountM.M(1),
metrics.AssignedIdentityUpdateDurationM.M(metrics.SinceInSeconds(begin)))
}()
v1AssignedID := aadpodv1.ConvertInternalAssignedIdentityToV1AssignedIdentity(*assignedIdentity)
err = c.rest.Put().Namespace(assignedIdentity.Namespace).Resource(aadpodid.AzureAssignedIDResource).Name(assignedIdentity.Name).Body(&v1AssignedID).Do(context.TODO()).Error()
if err != nil {
return fmt.Errorf("failed to update AzureAssignedIdentity, error: %+v", err)
}
klog.V(5).Infof("time taken to update %s/%s: %v", assignedIdentity.Namespace, assignedIdentity.Name, time.Since(begin))
stats.AggregateConcurrent(stats.UpdateAzureAssignedIdentity, begin, time.Now())
return nil
}
// ListBindings returns a list of azureidentitybindings
func (c *Client) ListBindings() (*[]aadpodid.AzureIdentityBinding, error) {
begin := time.Now()
var resList []aadpodid.AzureIdentityBinding
list := c.BindingInformer.GetStore().List()
for _, binding := range list {
o, ok := binding.(*aadpodv1.AzureIdentityBinding)
if !ok {
return nil, fmt.Errorf("failed to cast %T to %s", binding, aadpodv1.AzureIDBindingResource)
}
// Note: List items returned from cache have empty Kind and API version..
// Work around this issue since we need that for event recording to work.
o.SetGroupVersionKind(schema.GroupVersionKind{
Group: aadpodv1.SchemeGroupVersion.Group,
Version: aadpodv1.SchemeGroupVersion.Version,
Kind: reflect.TypeOf(*o).String()})
internalBinding := aadpodv1.ConvertV1BindingToInternalBinding(*o)
resList = append(resList, internalBinding)
klog.V(6).Infof("appending binding: %s/%s to list.", o.Namespace, o.Name)
}
stats.Aggregate(stats.AzureIdentityBindingList, time.Since(begin))
return &resList, nil
}
// ListAssignedIDs returns a list of azureassignedidentities
func (c *Client) ListAssignedIDs() (*[]aadpodid.AzureAssignedIdentity, error) {
begin := time.Now()
var resList []aadpodid.AzureAssignedIdentity
list := c.AssignedIDInformer.GetStore().List()
for _, assignedID := range list {
o, ok := assignedID.(*aadpodv1.AzureAssignedIdentity)
if !ok {
return nil, fmt.Errorf("failed to cast %T to %s", assignedID, aadpodv1.AzureAssignedIDResource)
}
// Note: List items returned from cache have empty Kind and API version..
// Work around this issue since we need that for event recording to work.
o.SetGroupVersionKind(schema.GroupVersionKind{
Group: aadpodv1.SchemeGroupVersion.Group,
Version: aadpodv1.SchemeGroupVersion.Version,
Kind: reflect.TypeOf(*o).String()})
out := aadpodv1.ConvertV1AssignedIdentityToInternalAssignedIdentity(*o)
resList = append(resList, out)
klog.V(6).Infof("appending AzureAssignedIdentity: %s/%s to list.", o.Namespace, o.Name)
}
stats.Aggregate(stats.AzureAssignedIdentityList, time.Since(begin))
return &resList, nil
}
// ListAssignedIDsInMap gets the list of current assigned ids, adds it to a map
// with assigned identity name as key and assigned identity as value.
func (c *Client) ListAssignedIDsInMap() (map[string]aadpodid.AzureAssignedIdentity, error) {
begin := time.Now()
result := make(map[string]aadpodid.AzureAssignedIdentity)
list := c.AssignedIDInformer.GetStore().List()
for _, assignedID := range list {
o, ok := assignedID.(*aadpodv1.AzureAssignedIdentity)
if !ok {
return nil, fmt.Errorf("failed to cast %T to %s", assignedID, aadpodv1.AzureAssignedIDResource)
}
// Note: List items returned from cache have empty Kind and API version..
// Work around this issue since we need that for event recording to work.
o.SetGroupVersionKind(schema.GroupVersionKind{
Group: aadpodv1.SchemeGroupVersion.Group,
Version: aadpodv1.SchemeGroupVersion.Version,
Kind: reflect.TypeOf(*o).String()})
out := aadpodv1.ConvertV1AssignedIdentityToInternalAssignedIdentity(*o)
// assigned identities names are unique across namespaces as we use pod name-<id ns>-<id name>
result[o.Name] = out
klog.V(6).Infof("added to map with key: %s", o.Name)
}
stats.Aggregate(stats.AzureAssignedIdentityList, time.Since(begin))
return result, nil
}
// ListIds returns a list of azureidentities
func (c *Client) ListIds() (*[]aadpodid.AzureIdentity, error) {
begin := time.Now()
var resList []aadpodid.AzureIdentity
list := c.IDInformer.GetStore().List()
for _, id := range list {
o, ok := id.(*aadpodv1.AzureIdentity)
if !ok {
return nil, fmt.Errorf("failed to cast %T to %s", id, aadpodv1.AzureIDResource)
}
// Note: List items returned from cache have empty Kind and API version..
// Work around this issue since we need that for event recording to work.
o.SetGroupVersionKind(schema.GroupVersionKind{
Group: aadpodv1.SchemeGroupVersion.Group,
Version: aadpodv1.SchemeGroupVersion.Version,
Kind: reflect.TypeOf(*o).String()})
out := aadpodv1.ConvertV1IdentityToInternalIdentity(*o)
resList = append(resList, out)
klog.V(6).Infof("appending AzureIdentity %s/%s to list.", o.Namespace, o.Name)
}
stats.Aggregate(stats.AzureIdentityList, time.Since(begin))
return &resList, nil
}
// ListPodIdentityExceptions returns list of azurepodidentityexceptions
func (c *Client) ListPodIdentityExceptions(ns string) (*[]aadpodid.AzurePodIdentityException, error) {
begin := time.Now()
var resList []aadpodid.AzurePodIdentityException
list := c.PodIdentityExceptionInformer.GetStore().List()
for _, binding := range list {
o, ok := binding.(*aadpodv1.AzurePodIdentityException)
if !ok {
return nil, fmt.Errorf("failed to cast %T to %s", binding, aadpodid.AzurePodIdentityExceptionResource)
}
if o.Namespace == ns {
// Note: List items returned from cache have empty Kind and API version..
// Work around this issue since we need that for event recording to work.
o.SetGroupVersionKind(schema.GroupVersionKind{
Group: aadpodv1.SchemeGroupVersion.Group,
Version: aadpodv1.SchemeGroupVersion.Version,
Kind: reflect.TypeOf(*o).String()})
out := aadpodv1.ConvertV1PodIdentityExceptionToInternalPodIdentityException(*o)
resList = append(resList, out)
klog.V(6).Infof("appending exception: %s/%s to list.", o.Namespace, o.Name)
}
}
stats.Aggregate(stats.AzurePodIdentityExceptionList, time.Since(begin))
return &resList, nil
}
// ListPodIds - given a pod with pod name space
// returns a map with list of azure identities in each state
func (c *Client) ListPodIds(podns, podname string) (map[string][]aadpodid.AzureIdentity, error) {
list, err := c.ListAssignedIDs()
if err != nil {
return nil, err
}
idStateMap := make(map[string][]aadpodid.AzureIdentity)
for _, v := range *list {
if v.Spec.Pod == podname && v.Spec.PodNamespace == podns {
idStateMap[v.Status.Status] = append(idStateMap[v.Status.Status], *v.Spec.AzureIdentityRef)
}
}
return idStateMap, nil
}
// GetPodIDsWithBinding returns list of azure identity based on bindings
// that match pod label.
func (c *Client) GetPodIDsWithBinding(namespace string, labels map[string]string) ([]aadpodid.AzureIdentity, error) {
// get all bindings
bindings, err := c.ListBindings()
if err != nil {
return nil, err
}
if bindings == nil {
return nil, fmt.Errorf("binding list is nil from cache")
}
matchingIds := make(map[string]bool)
podLabel := labels[aadpodid.CRDLabelKey]
for _, binding := range *bindings {
// check if binding selector in pod labels
if podLabel == binding.Spec.Selector && binding.Namespace == namespace {
matchingIds[binding.Spec.AzureIdentity] = true
}
}
// get the azure identity objects based on the list generated
azIdentities, err := c.ListIds()
if err != nil {
return nil, err
}
if azIdentities == nil {
return nil, fmt.Errorf("azure identities list is nil from cache")
}
var azIds []aadpodid.AzureIdentity
for _, azIdentity := range *azIdentities {
if _, exists := matchingIds[azIdentity.Name]; exists && azIdentity.Namespace == namespace {
azIds = append(azIds, azIdentity)
}
}
return azIds, nil
}
type patchStatusOps struct {
Op string `json:"op"`
Path string `json:"path"`
Value interface{} `json:"value"`
}
// UpdateAzureAssignedIdentityStatus updates the status field in AzureAssignedIdentity to indicate current status
func (c *Client) UpdateAzureAssignedIdentityStatus(assignedIdentity *aadpodid.AzureAssignedIdentity, status string) (err error) {
klog.Infof("updating AzureAssignedIdentity %s/%s status to %s", assignedIdentity.Namespace, assignedIdentity.Name, status)
defer func() {
if err != nil {
merr := c.reporter.ReportKubernetesAPIOperationError(metrics.UpdateAzureAssignedIdentityStatusOperationName)
if merr != nil {
klog.Warningf("failed to report metrics, error: %+v", merr)
}
}
}()
ops := make([]patchStatusOps, 1)
ops[0].Op = "replace"
ops[0].Path = "/status/status"
ops[0].Value = status
patchBytes, err := json.Marshal(ops)
if err != nil {
return err
}
begin := time.Now()
err = c.rest.
Patch(types.JSONPatchType).
Namespace(assignedIdentity.Namespace).
Resource(aadpodid.AzureAssignedIDResource).
Name(assignedIdentity.Name).
Body(patchBytes).
Do(context.TODO()).
Error()
klog.V(5).Infof("patch of %s took: %v", assignedIdentity.Name, time.Since(begin))
return err
}
func getMapKey(ns, name string) string {
return strings.Join([]string{ns, name}, "/")
}
func removeFinalizer(assignedID *aadpodv1.AzureAssignedIdentity) {
assignedID.SetFinalizers(removeString(finalizerName, assignedID.GetFinalizers()))
}
func hasFinalizer(assignedID *aadpodv1.AzureAssignedIdentity) bool {
return containsString(finalizerName, assignedID.GetFinalizers())
}
func containsString(s string, items []string) bool {
for _, item := range items {
if item == s {
return true
}
}
return false
}
func removeString(s string, items []string) []string {
var rval []string
for _, item := range items {
if item != s {
rval = append(rval, item)
}
}
return rval
} | metrics.AssignedIdentityDeletionDurationM.M(metrics.SinceInSeconds(begin)))
}()
var res aadpodv1.AzureAssignedIdentity |
Persons.js | import React, { Component } from 'react';
import Person from './Person/Person';
class | extends Component {
shouldComponentUpdate(nextProps, nextState){
return (nextProps.persons !== this.props.persons);
}
render() {
console.log('Perosns rendering..');
return this.props.persons.map(
(item, index) => <Person click = {this.props.clicked.bind(this.props, index)} name={item.name} age={item.age} key={item.id} changed= {this.props.changed.bind(this.props, item.id)}></Person>
)
}
}
export default Persons; | Persons |
ps6_recursion.py | # 6.00x Problem Set 6
#
# Part 2 - RECURSION
#
# Problem 3: Recursive String Reversal
#
def reverseString(aStr):
"""
Given a string, recursively returns a reversed copy of the string.
For example, if the string is 'abc', the function returns 'cba'.
The only string operations you are allowed to use are indexing,
slicing, and concatenation.
aStr: a string
returns: a reversed string
"""
if len(aStr) == 0:
return ''
return aStr[-1] + reverseString(aStr[0:-1])
#
# Problem 4: X-ian
#
def x_ian(x, word):
|
#
# Problem 5: Typewriter
#
def insertNewlines(text, lineLength):
"""
Given text and a desired line length, wrap the text as a typewriter would.
Insert a newline character ("\n") after each word that reaches or exceeds
the desired line length.
text: a string containing the text to wrap.
line_length: the number of characters to include on a line before wrapping
the next word.
returns: a string, with newline characters inserted appropriately.
"""
if len(text) < lineLength:
return text
chunk = text[0:lineLength]
return chunk + "\n" + insertNewlines(text[lineLength:], lineLength) | """
Given a string x, returns True if all the letters in x are
contained in word in the same order as they appear in x.
>>> x_ian('eric', 'meritocracy')
True
>>> x_ian('eric', 'cerium')
False
>>> x_ian('john', 'mahjong')
False
x: a string
word: a string
returns: True if word is x_ian, False otherwise
"""
# if we've eaten up the whole word, we must have found all chars
# in word
if len(x) == 0:
return True
# check if the next character can be found in rest of string
pos_x_i = word.find(x[0])
if pos_x_i == -1:
return False
return x_ian(x[1:], word[pos_x_i:]) |
lib.rs | mod utils;
extern crate web_sys;
use std::fmt;
use wasm_bindgen::prelude::*;
use web_sys::console;
// When the `wee_alloc` feature is enabled, use `wee_alloc` as the global
// allocator.
#[cfg(feature = "wee_alloc")]
#[global_allocator]
static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
// A macro to provide `println!(..)`-style syntax for `console.log` logging.
macro_rules! log {
( $( $t:tt )* ) => {
web_sys::console::log_1(&format!( $( $t )* ).into());
}
}
#[wasm_bindgen]
#[repr(u8)]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum Cell {
Dead = 0,
Alive = 1,
}
#[wasm_bindgen]
pub struct Universe {
width: u32,
height: u32,
cells: Vec<Cell>,
}
impl Cell {
fn toggle(&mut self) {
*self = match *self { | }
}
/// Public methods, exported to JavaScript
#[wasm_bindgen]
impl Universe {
pub fn width(&self) -> u32 {
self.width
}
/// Set the width of the universe.
///
/// Resets all cells to the dead state.
pub fn set_width(&mut self, width: u32) {
self.width = width;
self.cells = (0..width * self.height).map(|_i| Cell::Dead).collect();
}
pub fn height(&self) -> u32 {
self.width
}
/// Set the height of the universe.
///
/// Resets all cells to the dead state.
pub fn set_height(&mut self, height: u32) {
self.height = height;
self.cells = (0..self.width * height).map(|_i| Cell::Dead).collect();
}
pub fn cells(&self) -> *const Cell {
self.cells.as_ptr()
}
pub fn toggle_cell(&mut self, row: u32, column: u32) {
let idx = self.get_index(row, column);
self.cells[idx].toggle();
}
pub fn tick(&mut self) {
let _timer = Timer::new("Universe::tick");
let mut next = {
let _timer = Timer::new("allocate next cells");
self.cells.clone()
};
{
let _timer = Timer::new("new generation");
for row in 0..self.height {
for col in 0..self.width {
let idx = self.get_index(row, col);
let cell = self.cells[idx];
let live_neighbors = self.live_neighbor_count(row, col);
let next_cell = match (cell, live_neighbors) {
// Rule 1: Any live cell with fewer than two live neighbours
// dies, as if caused by underpopulation.
(Cell::Alive, x) if x < 2 => Cell::Dead,
// Rule 2: Any live cell with two or three live neighbours
// lives on to the next generation.
(Cell::Alive, 2) | (Cell::Alive, 3) => Cell::Alive,
// Rule 3: Any live cell with more than three live
// neighbours dies, as if by overpopulation.
(Cell::Alive, x) if x > 3 => Cell::Dead,
// Rule 4: Any dead cell with exactly three live neighbours
// becomes a live cell, as if by reproduction.
(Cell::Dead, 3) => Cell::Alive,
// All other cells remain in the same state.
(otherwise, _) => otherwise,
};
next[idx] = next_cell;
}
}
}
let _timer = Timer::new("free old cells");
self.cells = next;
}
pub fn new() -> Universe {
utils::set_panic_hook();
let width = 128;
let height = 128;
let cells = (0..width * height)
.map(|i| {
if i % 2 == 0 || i % 7 == 0 {
Cell::Alive
} else {
Cell::Dead
}
})
.collect();
Universe {
width,
height,
cells,
}
}
pub fn render(&self) -> String {
self.to_string()
}
}
impl Universe {
/// Get the dead and alive values of the entire universe.
pub fn get_cells(&self) -> &[Cell] {
&self.cells
}
/// Set cells to be alive in a universe by passing the row and column
/// of each cell as an array.
pub fn set_cells(&mut self, cells: &[(u32, u32)]) {
for (row, col) in cells.iter().cloned() {
let idx = self.get_index(row, col);
self.cells[idx] = Cell::Alive;
}
}
}
/// Private methods
impl Universe {
fn get_index(&self, row: u32, column: u32) -> usize {
(row * self.width + column) as usize
}
fn live_neighbor_count(&self, row: u32, column: u32) -> u8 {
let mut count = 0;
let north = if row == 0 { self.height - 1 } else { row - 1 };
let south = if row == self.height - 1 { 0 } else { row + 1 };
let west = if column == 0 {
self.width - 1
} else {
column - 1
};
let east = if column == self.width - 1 {
0
} else {
column + 1
};
let nw = self.get_index(north, west);
count += self.cells[nw] as u8;
let n = self.get_index(north, column);
count += self.cells[n] as u8;
let ne = self.get_index(north, east);
count += self.cells[ne] as u8;
let w = self.get_index(row, west);
count += self.cells[w] as u8;
let e = self.get_index(row, east);
count += self.cells[e] as u8;
let sw = self.get_index(south, west);
count += self.cells[sw] as u8;
let s = self.get_index(south, column);
count += self.cells[s] as u8;
let se = self.get_index(south, east);
count += self.cells[se] as u8;
count
}
}
impl fmt::Display for Universe {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for line in self.cells.as_slice().chunks(self.width as usize) {
for &cell in line {
let symbol = if cell == Cell::Dead { '◻' } else { '◼' };
write!(f, "{}", symbol)?;
}
write!(f, "\n")?;
}
Ok(())
}
}
pub struct Timer<'a> {
name: &'a str,
}
impl<'a> Timer<'a> {
pub fn new(name: &'a str) -> Timer<'a> {
console::time_with_label(name);
Timer { name }
}
}
impl<'a> Drop for Timer<'a> {
fn drop(&mut self) {
console::time_end_with_label(self.name);
}
} | Cell::Dead => Cell::Alive,
Cell::Alive => Cell::Dead,
}; |
loader.go | package loader
import (
"errors"
"github.com/projectdiscovery/gologger"
"github.com/projectdiscovery/nuclei/v2/pkg/catalog"
"github.com/projectdiscovery/nuclei/v2/pkg/catalog/loader/filter"
"github.com/projectdiscovery/nuclei/v2/pkg/model/types/severity"
"github.com/projectdiscovery/nuclei/v2/pkg/parsers"
"github.com/projectdiscovery/nuclei/v2/pkg/protocols"
"github.com/projectdiscovery/nuclei/v2/pkg/templates"
)
// Config contains the configuration options for the loader
type Config struct {
Templates []string
Workflows []string
ExcludeTemplates []string
IncludeTemplates []string
Tags []string
ExcludeTags []string
Authors []string
Severities severity.Severities
IncludeTags []string
Catalog *catalog.Catalog
ExecutorOptions protocols.ExecuterOptions
TemplatesDirectory string
}
// Store is a storage for loaded nuclei templates
type Store struct {
tagFilter *filter.TagFilter
pathFilter *filter.PathFilter
config *Config
finalTemplates []string
templates []*templates.Template
workflows []*templates.Template
preprocessor templates.Preprocessor
}
// New creates a new template store based on provided configuration
func New(config *Config) (*Store, error) {
// Create a tag filter based on provided configuration
store := &Store{
config: config,
tagFilter: filter.New(&filter.Config{
Tags: config.Tags,
ExcludeTags: config.ExcludeTags,
Authors: config.Authors,
Severities: config.Severities,
IncludeTags: config.IncludeTags,
}),
pathFilter: filter.NewPathFilter(&filter.PathFilterConfig{
IncludedTemplates: config.IncludeTemplates,
ExcludedTemplates: config.ExcludeTemplates,
}, config.Catalog),
}
// Handle a case with no templates or workflows, where we use base directory
if len(config.Templates) == 0 && len(config.Workflows) == 0 {
config.Templates = append(config.Templates, config.TemplatesDirectory)
}
store.finalTemplates = append(store.finalTemplates, config.Templates...)
return store, nil
}
// Templates returns all the templates in the store
func (store *Store) Templates() []*templates.Template {
return store.templates
}
// Workflows returns all the workflows in the store
func (store *Store) Workflows() []*templates.Template {
return store.workflows
}
// RegisterPreprocessor allows a custom preprocessor to be passed to the store to run against templates
func (store *Store) RegisterPreprocessor(preprocessor templates.Preprocessor) {
store.preprocessor = preprocessor
}
// Load loads all the templates from a store, performs filtering and returns
// the complete compiled templates for a nuclei execution configuration.
func (store *Store) Load() {
store.templates = store.LoadTemplates(store.finalTemplates)
store.workflows = store.LoadWorkflows(store.config.Workflows)
}
// ValidateTemplates takes a list of templates and validates them
// erroring out on discovering any faulty templates.
func (store *Store) ValidateTemplates(templatesList, workflowsList []string) error {
templatePaths := store.config.Catalog.GetTemplatesPath(templatesList)
workflowPaths := store.config.Catalog.GetTemplatesPath(workflowsList)
filteredTemplatePaths := store.pathFilter.Match(templatePaths)
filteredWorkflowPaths := store.pathFilter.Match(workflowPaths)
if areTemplatesValid(store, filteredTemplatePaths) && areWorkflowsValid(store, filteredWorkflowPaths) {
return nil
}
return errors.New("an error occurred during templates validation")
}
func areWorkflowsValid(store *Store, filteredWorkflowPaths map[string]struct{}) bool {
return areWorkflowOrTemplatesValid(store, filteredWorkflowPaths, true, func(templatePath string, tagFilter *filter.TagFilter) (bool, error) {
return parsers.LoadWorkflow(templatePath, store.tagFilter)
})
}
func areTemplatesValid(store *Store, filteredTemplatePaths map[string]struct{}) bool {
return areWorkflowOrTemplatesValid(store, filteredTemplatePaths, false, func(templatePath string, tagFilter *filter.TagFilter) (bool, error) {
return parsers.LoadTemplate(templatePath, store.tagFilter, nil)
})
}
func areWorkflowOrTemplatesValid(store *Store, filteredTemplatePaths map[string]struct{}, isWorkflow bool, load func(templatePath string, tagFilter *filter.TagFilter) (bool, error)) bool {
areTemplatesValid := true
for templatePath := range filteredTemplatePaths {
if _, err := load(templatePath, store.tagFilter); err != nil {
if isParsingError("Error occurred loading template %s: %s\n", templatePath, err) {
areTemplatesValid = false
continue
}
}
template, err := templates.Parse(templatePath, store.preprocessor, store.config.ExecutorOptions)
if err != nil {
if isParsingError("Error occurred parsing template %s: %s\n", templatePath, err) {
areTemplatesValid = false
}
} else {
if !isWorkflow && len(template.Workflows) > 0 {
return true
}
}
}
return areTemplatesValid
}
func isParsingError(message string, template string, err error) bool {
if err == templates.ErrCreateTemplateExecutor {
return false
}
if err == filter.ErrExcluded {
return false
}
gologger.Error().Msgf(message, template, err)
return true
}
// LoadTemplates takes a list of templates and returns paths for them
func (store *Store) LoadTemplates(templatesList []string) []*templates.Template {
includedTemplates := store.config.Catalog.GetTemplatesPath(templatesList)
templatePathMap := store.pathFilter.Match(includedTemplates)
loadedTemplates := make([]*templates.Template, 0, len(templatePathMap))
for templatePath := range templatePathMap {
loaded, err := parsers.LoadTemplate(templatePath, store.tagFilter, nil)
if err != nil {
gologger.Warning().Msgf("Could not load template %s: %s\n", templatePath, err)
}
if loaded {
parsed, err := templates.Parse(templatePath, store.preprocessor, store.config.ExecutorOptions)
if err != nil {
gologger.Warning().Msgf("Could not parse template %s: %s\n", templatePath, err)
} else if parsed != nil {
loadedTemplates = append(loadedTemplates, parsed)
}
}
}
return loadedTemplates
}
// LoadWorkflows takes a list of workflows and returns paths for them
func (store *Store) LoadWorkflows(workflowsList []string) []*templates.Template {
includedWorkflows := store.config.Catalog.GetTemplatesPath(workflowsList)
workflowPathMap := store.pathFilter.Match(includedWorkflows)
loadedWorkflows := make([]*templates.Template, 0, len(workflowPathMap))
for workflowPath := range workflowPathMap {
loaded, err := parsers.LoadWorkflow(workflowPath, store.tagFilter)
if err != nil {
gologger.Warning().Msgf("Could not load workflow %s: %s\n", workflowPath, err)
}
if loaded |
}
return loadedWorkflows
}
| {
parsed, err := templates.Parse(workflowPath, store.preprocessor, store.config.ExecutorOptions)
if err != nil {
gologger.Warning().Msgf("Could not parse workflow %s: %s\n", workflowPath, err)
} else if parsed != nil {
loadedWorkflows = append(loadedWorkflows, parsed)
}
} |
gr-diff-builder.ts | /**
* @license
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the 'License');
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an 'AS IS' BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {
ContentLoadNeededEventDetail,
DiffContextExpandedExternalDetail,
LineNumberEventDetail,
MovedLinkClickedEventDetail,
RenderPreferences,
} from '../../../api/diff';
import {getBaseUrl} from '../../../utils/url-util';
import {GrDiffLine, GrDiffLineType, LineNumber} from '../gr-diff/gr-diff-line';
import {GrDiffGroup, GrDiffGroupType} from '../gr-diff/gr-diff-group';
import '../gr-context-controls/gr-context-controls';
import {
GrContextControls,
GrContextControlsShowConfig,
} from '../gr-context-controls/gr-context-controls';
import {BlameInfo} from '../../../types/common';
import {DiffInfo, DiffPreferencesInfo} from '../../../types/diff';
import {DiffViewMode, Side} from '../../../constants/constants';
import {DiffLayer} from '../../../types/types';
/**
* In JS, unicode code points above 0xFFFF occupy two elements of a string.
* For example '𐀏'.length is 2. An occurrence of such a code point is called a
* surrogate pair.
*
* This regex segments a string along tabs ('\t') and surrogate pairs, since
* these are two cases where '1 char' does not automatically imply '1 column'.
*
* TODO: For human languages whose orthographies use combining marks, this
* approach won't correctly identify the grapheme boundaries. In those cases,
* a grapheme consists of multiple code points that should count as only one
* character against the column limit. Getting that correct (if it's desired)
* is probably beyond the limits of a regex, but there are nonstandard APIs to
* do this, and proposed (but, as of Nov 2017, unimplemented) standard APIs.
*
* Further reading:
* On Unicode in JS: https://mathiasbynens.be/notes/javascript-unicode
* Graphemes: http://unicode.org/reports/tr29/#Grapheme_Cluster_Boundaries
* A proposed JS API: https://github.com/tc39/proposal-intl-segmenter
*/
const REGEX_TAB_OR_SURROGATE_PAIR = /\t|[\uD800-\uDBFF][\uDC00-\uDFFF]/;
export interface DiffContextExpandedEventDetail
extends DiffContextExpandedExternalDetail {
groups: GrDiffGroup[];
section: HTMLElement;
numLines: number;
}
declare global {
interface HTMLElementEventMap {
'diff-context-expanded': CustomEvent<DiffContextExpandedEventDetail>;
'content-load-needed': CustomEvent<ContentLoadNeededEventDetail>;
}
}
export abstract class GrDiffBuilder {
private readonly _diff: DiffInfo;
private readonly _numLinesLeft: number;
private readonly _prefs: DiffPreferencesInfo;
private readonly _renderPrefs?: RenderPreferences;
protected readonly _outputEl: HTMLElement;
readonly groups: GrDiffGroup[];
private blameInfo: BlameInfo[] | null;
private readonly _layerUpdateListener: (
start: LineNumber,
end: LineNumber,
side: Side
) => void;
constructor(
diff: DiffInfo,
prefs: DiffPreferencesInfo,
outputEl: HTMLElement,
readonly layers: DiffLayer[] = [],
renderPrefs?: RenderPreferences
) {
this._diff = diff;
this._numLinesLeft = this._diff.content
? this._diff.content.reduce((sum, chunk) => {
const left = chunk.a || chunk.ab;
return sum + (left?.length || chunk.skip || 0);
}, 0)
: 0;
this._prefs = prefs;
this._renderPrefs = renderPrefs;
this._outputEl = outputEl;
this.groups = [];
this.blameInfo = null;
if (isNaN(prefs.tab_size) || prefs.tab_size <= 0) {
throw Error('Invalid tab size from preferences.');
}
if (isNaN(prefs.line_length) || prefs.line_length <= 0) {
throw Error('Invalid line length from preferences.');
}
this._layerUpdateListener = (
start: LineNumber,
end: LineNumber,
side: Side
) => this._handleLayerUpdate(start, end, side);
for (const layer of this.layers) {
if (layer.addListener) {
layer.addListener(this._layerUpdateListener);
}
}
}
clear() {
for (const layer of this.layers) {
if (layer.removeListener) {
layer.removeListener(this._layerUpdateListener);
}
}
}
// TODO(TS): Convert to enum.
static readonly GroupType = {
ADDED: 'b',
BOTH: 'ab',
REMOVED: 'a',
};
// TODO(TS): Convert to enum.
static readonly Highlights = {
ADDED: 'edit_b',
REMOVED: 'edit_a',
};
abstract addColumns(outputEl: HTMLElement, fontSize: number): void;
abstract buildSectionElement(group: GrDiffGroup): HTMLElement;
emitGroup(group: GrDiffGroup, beforeSection: HTMLElement | null) {
const element = this.buildSectionElement(group);
this._outputEl.insertBefore(element, beforeSection);
group.element = element;
}
getGroupsByLineRange(
startLine: LineNumber,
endLine: LineNumber,
side?: Side
) {
const groups = [];
for (let i = 0; i < this.groups.length; i++) {
const group = this.groups[i];
if (group.lines.length === 0) {
continue;
}
let groupStartLine = 0;
let groupEndLine = 0;
if (side) {
const range =
side === Side.LEFT ? group.lineRange.left : group.lineRange.right;
groupStartLine = range.start_line;
groupEndLine = range.end_line;
}
if (groupStartLine === 0) {
// Line was removed or added.
groupStartLine = groupEndLine;
}
if (groupEndLine === 0) {
// Line was removed or added.
groupEndLine = groupStartLine;
}
if (startLine <= groupEndLine && endLine >= groupStartLine) {
groups.push(group);
}
}
return groups;
}
getContentTdByLine(
lineNumber: LineNumber,
side?: Side,
root: Element = this._outputEl
): Element | null {
const sideSelector: string = side ? `.${side}` : '';
return root.querySelector(
`td.lineNum[data-value="${lineNumber}"]${sideSelector} ~ td.content`
);
}
getContentByLine(
lineNumber: LineNumber,
side?: Side,
root?: HTMLElement
): HTMLElement | null {
const td = this.getContentTdByLine(lineNumber, side, root);
return td ? td.querySelector('.contentText') : null;
}
/**
* Find line elements or line objects by a range of line numbers and a side.
*
* @param start The first line number
* @param end The last line number
* @param side The side of the range. Either 'left' or 'right'.
* @param out_lines The output list of line objects. Use null if not desired.
* @param out_elements The output list of line elements. Use null if not
* desired.
*/
findLinesByRange(
start: LineNumber,
end: LineNumber,
side: Side,
out_lines: GrDiffLine[] | null,
out_elements: HTMLElement[] | null
) {
const groups = this.getGroupsByLineRange(start, end, side);
for (const group of groups) {
let content: HTMLElement | null = null;
for (const line of group.lines) {
if (
(side === 'left' && line.type === GrDiffLineType.ADD) ||
(side === 'right' && line.type === GrDiffLineType.REMOVE)
) {
continue;
}
const lineNumber =
side === 'left' ? line.beforeNumber : line.afterNumber;
if (lineNumber < start || lineNumber > end) {
continue;
}
if (out_lines) {
out_lines.push(line);
}
if (out_elements) {
if (content) {
content = this._getNextContentOnSide(content, side);
} else {
content = this.getContentByLine(lineNumber, side, group.element);
}
if (content) {
out_elements.push(content);
}
}
}
}
}
/**
* Re-renders the DIV.contentText elements for the given side and range of
* diff content.
*/
_renderContentByRange(start: LineNumber, end: LineNumber, side: Side) {
const lines: GrDiffLine[] = [];
const elements: HTMLElement[] = [];
let line;
let el;
this.findLinesByRange(start, end, side, lines, elements);
for (let i = 0; i < lines.length; i++) {
line = lines[i];
el = elements[i];
if (!el || !el.parentElement) {
// Cannot re-render an element if it does not exist. This can happen
// if lines are collapsed and not visible on the page yet.
continue;
}
const lineNumberEl = this._getLineNumberEl(el, side);
el.parentElement.replaceChild(
this._createTextEl(lineNumberEl, line, side).firstChild!,
el
);
}
}
getSectionsByLineRange(
startLine: LineNumber,
endLine: LineNumber,
side: Side
) {
return this.getGroupsByLineRange(startLine, endLine, side).map(
group => group.element
);
}
_createContextControls(
section: HTMLElement,
contextGroups: GrDiffGroup[],
viewMode: DiffViewMode
) {
const leftStart = contextGroups[0].lineRange.left.start_line;
const leftEnd =
contextGroups[contextGroups.length - 1].lineRange.left.end_line;
const firstGroupIsSkipped = !!contextGroups[0].skip;
const lastGroupIsSkipped = !!contextGroups[contextGroups.length - 1].skip;
const containsWholeFile = this._numLinesLeft === leftEnd - leftStart + 1;
const showAbove =
(leftStart > 1 && !firstGroupIsSkipped) || containsWholeFile;
const showBelow = leftEnd < this._numLinesLeft && !lastGroupIsSkipped;
if (showAbove) {
const paddingRow = this._createContextControlPaddingRow(viewMode);
paddingRow.classList.add('above');
section.appendChild(paddingRow);
}
section.appendChild(
this._createContextControlRow(
section,
contextGroups,
showAbove,
showBelow,
viewMode
)
);
if (showBelow) {
const paddingRow = this._createContextControlPaddingRow(viewMode);
paddingRow.classList.add('below');
section.appendChild(paddingRow);
}
}
/**
* Creates context controls. Buttons extend from the gap created by this
* method up or down into the area of code that they affect.
*/
_createContextControlRow(
section: HTMLElement,
contextGroups: GrDiffGroup[],
showAbove: boolean,
showBelow: boolean,
viewMode: DiffViewMode
): HTMLElement {
const row = this._createElement('tr', 'dividerRow');
let showConfig: GrContextControlsShowConfig;
if (showAbove && !showBelow) {
showConfig = 'above';
} else if (!showAbove && showBelow) {
showConfig = 'below';
} else {
// Note that !showAbove && !showBelow also intentionally creates
// "show-both". This means the file is completely collapsed, which is
// unusual, but at least happens in one test.
showConfig = 'both';
}
row.classList.add(`show-${showConfig}`);
row.appendChild(this._createBlameCell(0));
if (viewMode === DiffViewMode.SIDE_BY_SIDE) {
row.appendChild(this._createElement('td'));
}
const cell = this._createElement('td', 'dividerCell');
cell.setAttribute('colspan', '3');
row.appendChild(cell);
const contextControls = this._createElement(
'gr-context-controls'
) as GrContextControls;
contextControls.diff = this._diff;
contextControls.renderPreferences = this._renderPrefs;
contextControls.section = section;
contextControls.contextGroups = contextGroups;
contextControls.showConfig = showConfig;
cell.appendChild(contextControls);
return row;
}
/**
* Creates a table row to serve as padding between code and context controls.
* Blame column, line gutters, and content area will continue visually, but
* context controls can render over this background to map more clearly to
* the area of code they expand.
*/
_createContextControlPaddingRow(viewMode: DiffViewMode) {
const row = this._createElement('tr', 'contextBackground');
if (viewMode === DiffViewMode.SIDE_BY_SIDE) {
row.classList.add('side-by-side');
row.setAttribute('left-type', GrDiffGroupType.CONTEXT_CONTROL);
row.setAttribute('right-type', GrDiffGroupType.CONTEXT_CONTROL);
} else {
row.classList.add('unified');
}
row.appendChild(this._createBlameCell(0));
row.appendChild(this._createElement('td', 'contextLineNum'));
if (viewMode === DiffViewMode.SIDE_BY_SIDE) {
row.appendChild(this._createElement('td'));
}
row.appendChild(this._createElement('td', 'contextLineNum'));
row.appendChild(this._createElement('td'));
return row;
}
_createLineEl(
line: GrDiffLine,
number: LineNumber,
type: GrDiffLineType,
side: Side
) {
const td = this._createElement('td');
td.classList.add(side);
if (line.type === GrDiffLineType.BLANK) {
return td;
}
if (line.type === GrDiffLineType.BOTH || line.type === type) {
td.classList.add('lineNum');
td.dataset['value'] = number.toString();
if (
((this._prefs.show_file_comment_button === false ||
this._renderPrefs?.show_file_comment_button === false) &&
number === 'FILE') ||
number === 'LOST'
) {
return td;
}
const button = this._createElement('button');
td.appendChild(button);
button.tabIndex = -1;
button.classList.add('lineNumButton');
button.classList.add(side);
button.dataset['value'] = number.toString();
button.textContent = number === 'FILE' ? 'File' : number.toString();
if (number === 'FILE') {
button.setAttribute('aria-label', 'Add file comment');
}
// Add aria-labels for valid line numbers.
// For unified diff, this method will be called with number set to 0 for
// the empty line number column for added/removed lines. This should not
// be announced to the screenreader.
if (number > 0) {
if (line.type === GrDiffLineType.REMOVE) {
button.setAttribute('aria-label', `${number} removed`);
} else if (line.type === GrDiffLineType.ADD) {
button.setAttribute('aria-label', `${number} added`);
}
}
button.addEventListener('mouseenter', () => {
button.dispatchEvent(
new CustomEvent<LineNumberEventDetail>('line-number-mouse-enter', {
detail: {
lineNum: number,
side,
},
composed: true,
bubbles: true,
})
);
});
button.addEventListener('mouseleave', () => {
button.dispatchEvent(
new CustomEvent<LineNumberEventDetail>('line-number-mouse-leave', {
detail: {
lineNum: number,
side,
},
composed: true,
bubbles: true,
})
);
});
}
return td;
}
_createTextEl(
lineNumberEl: HTMLElement | null,
line: GrDiffLine,
side?: Side
) {
const td = this._createElement('td');
if (line.type !== GrDiffLineType.BLANK) {
td.classList.add('content');
}
// If intraline info is not available, the entire line will be
// considered as changed and marked as dark red / green color
if (!line.hasIntralineInfo) {
td.classList.add('no-intraline-info');
}
td.classList.add(line.type);
if (line.beforeNumber !== 'FILE' && line.beforeNumber !== 'LOST') {
const lineLimit = !this._prefs.line_wrapping
? this._prefs.line_length
: Infinity;
const contentText = this._formatText(
line.text,
this._prefs.tab_size,
lineLimit
);
if (side) {
contentText.setAttribute('data-side', side);
}
if (lineNumberEl && side) {
for (const layer of this.layers) {
if (typeof layer.annotate === 'function') {
layer.annotate(contentText, lineNumberEl, line, side);
}
}
} else {
console.error('lineNumberEl or side not set, skipping layer.annotate');
}
td.appendChild(contentText);
} else if (line.beforeNumber === 'FILE') td.classList.add('file');
else if (line.beforeNumber === 'LOST') td.classList.add('lost');
return td;
}
/**
* Returns a 'div' element containing the supplied |text| as its innerText,
* with '\t' characters expanded to a width determined by |tabSize|, and the
* text wrapped at column |lineLimit|, which may be Infinity if no wrapping is
* desired.
*
* @param text The text to be formatted.
* @param tabSize The width of each tab stop.
* @param lineLimit The column after which to wrap lines.
*/
_formatText(text: string, tabSize: number, lineLimit: number): HTMLElement {
const contentText = this._createElement('div', 'contentText');
let columnPos = 0;
let textOffset = 0;
for (const segment of text.split(REGEX_TAB_OR_SURROGATE_PAIR)) {
if (segment) {
// |segment| contains only normal characters. If |segment| doesn't fit
// entirely on the current line, append chunks of |segment| followed by
// line breaks.
let rowStart = 0;
let rowEnd = lineLimit - columnPos;
while (rowEnd < segment.length) {
contentText.appendChild(
document.createTextNode(segment.substring(rowStart, rowEnd))
);
contentText.appendChild(this._createElement('span', 'br'));
columnPos = 0;
rowStart = rowEnd;
rowEnd += lineLimit;
}
// Append the last part of |segment|, which fits on the current line.
contentText.appendChild(
document.createTextNode(segment.substring(rowStart))
);
columnPos += segment.length - rowStart;
textOffset += segment.length;
}
if (textOffset < text.length) {
// Handle the special character at |textOffset|.
if (text.startsWith('\t', textOffset)) {
// Append a single '\t' character.
let effectiveTabSize = tabSize - (columnPos % tabSize);
if (columnPos + effectiveTabSize > lineLimit) {
contentText.appendChild(this._createElement('span', 'br'));
columnPos = 0;
effectiveTabSize = tabSize;
}
contentText.appendChild(this._getTabWrapper(effectiveTabSize));
columnPos += effectiveTabSize;
textOffset++;
} else {
// Append a single surrogate pair.
if (columnPos >= lineLimit) {
contentText.appendChild(this._createElement('span', 'br'));
columnPos = 0;
}
contentText.appendChild(
document.createTextNode(text.substring(textOffset, textOffset + 2))
);
textOffset += 2;
columnPos += 1;
}
}
}
return contentText;
}
/**
* Returns a <span> element holding a '\t' character, that will visually
* occupy |tabSize| many columns.
*
* @param tabSize The effective size of this tab stop.
*/
_getTabWrapper(tabSize: number): HTMLElement {
// Force this to be a number to prevent arbitrary injection.
const result = this._createElement('span', 'tab');
result.setAttribute(
'style',
`tab-size: ${tabSize}; -moz-tab-size: ${tabSize};`
);
result.innerText = '\t';
return result;
}
_createElement(tagName: string, classStr?: string): HTMLElement {
const el = document.createElement(tagName);
// When Shady DOM is being used, these classes are added to account for
// Polymer's polyfill behavior. In order to guarantee sufficient
// specificity within the CSS rules, these are added to every element.
// Since the Polymer DOM utility functions (which would do this
// automatically) are not being used for performance reasons, this is
// done manually.
el.classList.add('style-scope', 'gr-diff');
if (classStr) {
for (const className of classStr.split(' ')) {
el.classList.add(className);
}
}
return el;
}
_handleLayerUpdate(start: LineNumber, end: LineNumber, side: Side) {
this._renderContentByRange(start, end, side);
}
/**
* Finds the next DIV.contentText element following the given element, and on
* the same side. Will only search within a group.
*/
abstract _getNextContentOnSide(
content: HTMLElement,
side: Side
): HTMLElement | null;
/**
* Gets configuration for creating move controls for chunks marked with
* dueToMove
*/
abstract _getMoveControlsConfig(): {
numberOfCells: number;
movedOutIndex: number;
movedInIndex: number;
};
/**
* Determines whether the given group is either totally an addition or totally
* a removal.
*/
_isTotal(group: GrDiffGroup): boolean {
return (
group.type === GrDiffGroupType.DELTA &&
(!group.adds.length || !group.removes.length) &&
!(!group.adds.length && !group.removes.length)
);
}
/**
* Set the blame information for the diff. For any already-rendered line,
* re-render its blame cell content.
*/
setBlame(blame: BlameInfo[] | null) {
this.blameInfo = blame;
if (!blame) return;
// TODO(wyatta): make this loop asynchronous.
for (const commit of blame) {
for (const range of commit.ranges) {
for (let i = range.start; i <= range.end; i++) {
// TODO(wyatta): this query is expensive, but, when traversing a
// range, the lines are consecutive, and given the previous blame
// cell, the next one can be reached cheaply.
const el = this._getBlameByLineNum(i);
if (!el) {
continue;
}
// Remove the element's children (if any).
while (el.hasChildNodes()) {
el.removeChild(el.lastChild!);
}
const blame = this._getBlameForBaseLine(i, commit);
if (blame) el.appendChild(blame);
}
}
}
}
_createMovedLineAnchor(line: number, side: Side) {
const anchor = this._createElementWithText('a', `${line}`);
// href is not actually used but important for Screen Readers
anchor.setAttribute('href', `#${line}`);
anchor.addEventListener('click', e => {
e.preventDefault();
anchor.dispatchEvent(
new CustomEvent<MovedLinkClickedEventDetail>('moved-link-clicked', {
detail: {
lineNum: line,
side,
},
composed: true,
bubbles: true,
})
);
});
return anchor;
}
_createElementWithText(tagName: string, textContent: string) {
const element = this._createElement(tagName);
element.textContent = textContent;
return element;
}
_createMoveDescriptionDiv(movedIn: boolean, group: GrDiffGroup) {
const div = this._createElement('div');
if (group.moveDetails?.range) {
const {changed, range} = group.moveDetails;
const otherSide = movedIn ? Side.LEFT : Side.RIGHT;
const andChangedLabel = changed ? 'and changed ' : '';
const direction = movedIn ? 'from' : 'to';
const textLabel = `Moved ${andChangedLabel}${direction} lines `;
div.appendChild(this._createElementWithText('span', textLabel));
div.appendChild(this._createMovedLineAnchor(range.start, otherSide));
div.appendChild(this._createElementWithText('span', ' - '));
div.appendChild(this._createMovedLineAnchor(range.end, otherSide));
} else {
div.appendChild(
this._createElementWithText('span', movedIn ? 'Moved in' : 'Moved out')
);
}
return div;
}
_buildMoveControls(group: GrDiffGroup) {
const movedIn = group.adds.length > 0;
const {
numberOfCells,
movedOutIndex,
movedInIndex,
} = this._getMoveControlsConfig();
let controlsClass;
let descriptionIndex;
const descriptionTextDiv = this._createMoveDescriptionDiv(movedIn, group);
if (movedIn) {
controlsClass = 'movedIn';
descriptionIndex = movedInIndex;
} else {
controlsClass = 'movedOut';
descriptionIndex = movedOutIndex;
}
const controls = this._createElement('tr', `moveControls ${controlsClass}`);
const cells = [...Array(numberOfCells).keys()].map(() =>
this._createElement('td')
);
const moveRangeHeader = this._createElement('gr-range-header');
moveRangeHeader.setAttribute('icon', 'gr-icons:move-item');
moveRangeHeader.appendChild(descriptionTextDiv);
cells[descriptionIndex].classList.add('moveHeader');
cells[descriptionIndex].appendChild(moveRangeHeader);
cells.forEach(c => {
controls.appendChild(c);
});
return controls;
}
/**
* Find the blame cell for a given line number.
*/
_getBlameByLineNum(lineNum: number): Element | null {
return this._outputEl.querySelector(
`td.blame[data-line-number="${lineNum}"]`
);
}
/**
* Given a base line number, return the commit containing that line in the
* current set of blame information. If no blame information has been
* provided, null is returned.
*
* @return The commit information.
*/
_getBlameCommitForBaseLine(lineNum: LineNumber) {
if (!this.blameInfo) {
return null;
}
for (const blameCommit of this.blameInfo) {
for (const range of blameCommit.ranges) {
if (range.start <= lineNum && range.end >= lineNum) {
return blameCommit;
}
}
}
return null;
}
/**
* Given the number of a base line, get the content for the blame cell of that
* line. If there is no blame information for that line, returns null.
*
* @param commit Optionally provide the commit object, so that
* it does not need to be searched.
*/
_getBlameForBaseLine(
lineNum: LineNumber,
commit: BlameInfo | null = this._getBlameCommitForBaseLine(lineNum)
): HTMLElement | null {
if (!commit) {
return null;
}
const isStartOfRange = commit.ranges.some(r => r.start === lineNum);
const date = new Date(commit.time * 1000).toLocaleDateString();
const blameNode = this._createElement(
'span',
isStartOfRange ? 'startOfRange' : ''
);
const shaNode = this._createElement('a', 'blameDate');
shaNode.innerText = `${date}`;
shaNode.setAttribute('href', `${getBaseUrl()}/q/${commit.id}`);
blameNode.appendChild(shaNode);
const shortName = commit.author.split(' ')[0];
const authorNode = this._createElement('span', 'blameAuthor');
authorNode.innerText = ` ${shortName}`;
blameNode.appendChild(authorNode);
const hoverCardFragment = this._createElement('span', 'blameHoverCard');
hoverCardFragment.innerText = `Commit ${commit.id}
Author: ${commit.author}
Date: ${date}
${commit.commit_msg}`;
const hovercard = this._createElement('gr-hovercard');
hovercard.appendChild(hoverCardFragment);
blameNode.appendChild(hovercard);
return blameNode;
}
/**
* Create a blame cell for the given base line. Blame information will be
* included in the cell if available.
*/
_createBlameCell(lineNumber: LineNumber): HTMLTableDataCellElement {
const blameTd = this._createElement(
'td',
'blame'
) as HTMLTableDataCellElement;
blameTd.setAttribute('data-line-number', lineNumber.toString());
if (lineNumber) { | const content = this._getBlameForBaseLine(lineNumber);
if (content) {
blameTd.appendChild(content);
}
}
return blameTd;
}
/**
* Finds the line number element given the content element by walking up the
* DOM tree to the diff row and then querying for a .lineNum element on the
* requested side.
*
* TODO(brohlfs): Consolidate this with getLineEl... methods in html file.
*/
_getLineNumberEl(content: HTMLElement, side: Side): HTMLElement | null {
let row: HTMLElement | null = content;
while (row && !row.classList.contains('diff-row')) row = row.parentElement;
return row ? (row.querySelector('.lineNum.' + side) as HTMLElement) : null;
}
} | |
keymember_3.rs | #[cfg(all(not(target_arch = "wasm32"), test))]
mod test;
use anyhow::*;
use liblumen_alloc::erts::exception;
use liblumen_alloc::erts::term::prelude::*;
use crate::runtime::context::*;
#[native_implemented::function(lists:keymember/3)]
pub fn | (key: Term, index: Term, tuple_list: Term) -> exception::Result<Term> {
let index = term_try_into_one_based_index(index)?;
match tuple_list.decode()? {
TypedTerm::Nil => Ok(false.into()),
TypedTerm::List(cons) => match cons.keyfind(index, key)? {
Some(_) => Ok(true.into()),
None => Ok(false.into()),
},
_ => Err(TypeError)
.context(format!("tuple_list ({}) is not a proper list", tuple_list))
.map_err(From::from),
}
}
| result |
inline.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Support for inlining external documentation into the current AST.
use syntax::ast;
use syntax::ast_util;
use syntax::attr::AttrMetaMethods;
use rustc::metadata::csearch;
use rustc::metadata::decoder;
use rustc::middle::def;
use rustc::middle::ty;
use rustc::middle::subst;
use rustc::middle::stability;
use core;
use doctree;
use clean;
use super::Clean;
/// Attempt to inline the definition of a local node id into this AST.
///
/// This function will fetch the definition of the id specified, and if it is
/// from another crate it will attempt to inline the documentation from the
/// other crate into this crate.
///
/// This is primarily used for `pub use` statements which are, in general,
/// implementation details. Inlining the documentation should help provide a
/// better experience when reading the documentation in this use case.
///
/// The returned value is `None` if the `id` could not be inlined, and `Some`
/// of a vector of items if it was successfully expanded.
pub fn try_inline(id: ast::NodeId, into: Option<ast::Ident>)
-> Option<Vec<clean::Item>> {
let cx = ::ctxtkey.get().unwrap();
let tcx = match cx.maybe_typed {
core::Typed(ref tycx) => tycx,
core::NotTyped(_) => return None,
};
let def = match tcx.def_map.borrow().find(&id) {
Some(def) => *def,
None => return None,
};
let did = def.def_id();
if ast_util::is_local(did) { return None }
try_inline_def(&**cx, tcx, def).map(|vec| {
vec.move_iter().map(|mut item| {
match into {
Some(into) if item.name.is_some() => {
item.name = Some(into.clean());
}
_ => {}
}
item
}).collect()
})
}
fn try_inline_def(cx: &core::DocContext,
tcx: &ty::ctxt,
def: def::Def) -> Option<Vec<clean::Item>> {
let mut ret = Vec::new();
let did = def.def_id();
let inner = match def {
def::DefTrait(did) => {
record_extern_fqn(cx, did, clean::TypeTrait);
clean::TraitItem(build_external_trait(tcx, did))
}
def::DefFn(did, style) => {
// If this function is a tuple struct constructor, we just skip it
if csearch::get_tuple_struct_definition_if_ctor(&tcx.sess.cstore,
did).is_some() {
return None
}
record_extern_fqn(cx, did, clean::TypeFunction);
clean::FunctionItem(build_external_function(tcx, did, style))
}
def::DefStruct(did) => {
record_extern_fqn(cx, did, clean::TypeStruct);
ret.extend(build_impls(cx, tcx, did).move_iter());
clean::StructItem(build_struct(tcx, did))
}
def::DefTy(did) => {
record_extern_fqn(cx, did, clean::TypeEnum);
ret.extend(build_impls(cx, tcx, did).move_iter());
build_type(tcx, did)
}
// Assume that the enum type is reexported next to the variant, and
// variants don't show up in documentation specially.
def::DefVariant(..) => return Some(Vec::new()),
def::DefMod(did) => {
record_extern_fqn(cx, did, clean::TypeModule);
clean::ModuleItem(build_module(cx, tcx, did))
}
def::DefStatic(did, mtbl) => {
record_extern_fqn(cx, did, clean::TypeStatic);
clean::StaticItem(build_static(tcx, did, mtbl))
}
_ => return None,
};
let fqn = csearch::get_item_path(tcx, did);
cx.inlined.borrow_mut().as_mut().unwrap().insert(did);
ret.push(clean::Item {
source: clean::Span::empty(),
name: Some(fqn.last().unwrap().to_string()),
attrs: load_attrs(tcx, did),
inner: inner,
visibility: Some(ast::Public),
stability: stability::lookup(tcx, did).clean(),
def_id: did,
});
Some(ret)
}
pub fn load_attrs(tcx: &ty::ctxt, did: ast::DefId) -> Vec<clean::Attribute> {
let mut attrs = Vec::new();
csearch::get_item_attrs(&tcx.sess.cstore, did, |v| {
attrs.extend(v.move_iter().map(|a| {
a.clean()
}));
});
attrs
}
/// Record an external fully qualified name in the external_paths cache.
///
/// These names are used later on by HTML rendering to generate things like
/// source links back to the original item.
pub fn record_extern_fqn(cx: &core::DocContext,
did: ast::DefId,
kind: clean::TypeKind) {
match cx.maybe_typed {
core::Typed(ref tcx) => {
let fqn = csearch::get_item_path(tcx, did);
let fqn = fqn.move_iter().map(|i| i.to_string()).collect();
cx.external_paths.borrow_mut().as_mut().unwrap().insert(did, (fqn, kind));
}
core::NotTyped(..) => {}
}
}
pub fn build_external_trait(tcx: &ty::ctxt, did: ast::DefId) -> clean::Trait {
let def = ty::lookup_trait_def(tcx, did);
let trait_items = ty::trait_items(tcx, did).clean();
let provided = ty::provided_trait_methods(tcx, did);
let mut items = trait_items.move_iter().map(|trait_item| {
if provided.iter().any(|a| a.def_id == trait_item.def_id) {
clean::ProvidedMethod(trait_item)
} else {
clean::RequiredMethod(trait_item)
}
});
let trait_def = ty::lookup_trait_def(tcx, did);
let bounds = trait_def.bounds.clean();
clean::Trait {
generics: (&def.generics, subst::TypeSpace).clean(),
items: items.collect(),
bounds: bounds,
}
}
fn build_external_function(tcx: &ty::ctxt,
did: ast::DefId,
style: ast::FnStyle) -> clean::Function {
let t = ty::lookup_item_type(tcx, did);
clean::Function {
decl: match ty::get(t.ty).sty {
ty::ty_bare_fn(ref f) => (did, &f.sig).clean(),
_ => fail!("bad function"),
},
generics: (&t.generics, subst::FnSpace).clean(),
fn_style: style,
}
}
fn build_struct(tcx: &ty::ctxt, did: ast::DefId) -> clean::Struct {
use syntax::parse::token::special_idents::unnamed_field;
let t = ty::lookup_item_type(tcx, did);
let fields = ty::lookup_struct_fields(tcx, did);
clean::Struct {
struct_type: match fields.as_slice() {
[] => doctree::Unit,
[ref f] if f.name == unnamed_field.name => doctree::Newtype,
[ref f, ..] if f.name == unnamed_field.name => doctree::Tuple,
_ => doctree::Plain,
},
generics: (&t.generics, subst::TypeSpace).clean(),
fields: fields.clean(),
fields_stripped: false,
}
}
fn build_type(tcx: &ty::ctxt, did: ast::DefId) -> clean::ItemEnum {
let t = ty::lookup_item_type(tcx, did);
match ty::get(t.ty).sty {
ty::ty_enum(edid, _) if !csearch::is_typedef(&tcx.sess.cstore, did) => {
return clean::EnumItem(clean::Enum {
generics: (&t.generics, subst::TypeSpace).clean(),
variants_stripped: false,
variants: ty::enum_variants(tcx, edid).clean(),
})
}
_ => {}
}
clean::TypedefItem(clean::Typedef {
type_: t.ty.clean(),
generics: (&t.generics, subst::TypeSpace).clean(),
})
}
fn build_impls(cx: &core::DocContext,
tcx: &ty::ctxt,
did: ast::DefId) -> Vec<clean::Item> {
ty::populate_implementations_for_type_if_necessary(tcx, did);
let mut impls = Vec::new();
match tcx.inherent_impls.borrow().find(&did) {
None => {}
Some(i) => {
impls.extend(i.borrow().iter().map(|&did| { build_impl(cx, tcx, did) }));
} | // we inline *all* impls from the crate into this crate. Note that there's
// currently no way for us to filter this based on type, and we likely need
// many impls for a variety of reasons.
//
// Primarily, the impls will be used to populate the documentation for this
// type being inlined, but impls can also be used when generating
// documentation for primitives (no way to find those specifically).
if cx.populated_crate_impls.borrow_mut().insert(did.krate) {
csearch::each_top_level_item_of_crate(&tcx.sess.cstore,
did.krate,
|def, _, _| {
populate_impls(cx, tcx, def, &mut impls)
});
fn populate_impls(cx: &core::DocContext,
tcx: &ty::ctxt,
def: decoder::DefLike,
impls: &mut Vec<Option<clean::Item>>) {
match def {
decoder::DlImpl(did) => impls.push(build_impl(cx, tcx, did)),
decoder::DlDef(def::DefMod(did)) => {
csearch::each_child_of_item(&tcx.sess.cstore,
did,
|def, _, _| {
populate_impls(cx, tcx, def, impls)
})
}
_ => {}
}
}
}
impls.move_iter().filter_map(|a| a).collect()
}
fn build_impl(cx: &core::DocContext,
tcx: &ty::ctxt,
did: ast::DefId) -> Option<clean::Item> {
if !cx.inlined.borrow_mut().as_mut().unwrap().insert(did) {
return None
}
let associated_trait = csearch::get_impl_trait(tcx, did);
// If this is an impl for a #[doc(hidden)] trait, be sure to not inline it.
match associated_trait {
Some(ref t) => {
let trait_attrs = load_attrs(tcx, t.def_id);
if trait_attrs.iter().any(|a| is_doc_hidden(a)) {
return None
}
}
None => {}
}
let attrs = load_attrs(tcx, did);
let ty = ty::lookup_item_type(tcx, did);
let trait_items = csearch::get_impl_items(&tcx.sess.cstore, did)
.iter()
.filter_map(|did| {
let did = did.def_id();
let impl_item = ty::impl_or_trait_item(tcx, did);
match impl_item {
ty::MethodTraitItem(method) => {
if method.vis != ast::Public && associated_trait.is_none() {
return None
}
let mut item = method.clean();
item.inner = match item.inner.clone() {
clean::TyMethodItem(clean::TyMethod {
fn_style, decl, self_, generics
}) => {
clean::MethodItem(clean::Method {
fn_style: fn_style,
decl: decl,
self_: self_,
generics: generics,
})
}
_ => fail!("not a tymethod"),
};
Some(item)
}
}
}).collect();
return Some(clean::Item {
inner: clean::ImplItem(clean::Impl {
derived: clean::detect_derived(attrs.as_slice()),
trait_: associated_trait.clean().map(|bound| {
match bound {
clean::TraitBound(ty) => ty,
clean::RegionBound => unreachable!(),
}
}),
for_: ty.ty.clean(),
generics: (&ty.generics, subst::TypeSpace).clean(),
items: trait_items,
}),
source: clean::Span::empty(),
name: None,
attrs: attrs,
visibility: Some(ast::Inherited),
stability: stability::lookup(tcx, did).clean(),
def_id: did,
});
fn is_doc_hidden(a: &clean::Attribute) -> bool {
match *a {
clean::List(ref name, ref inner) if name.as_slice() == "doc" => {
inner.iter().any(|a| {
match *a {
clean::Word(ref s) => s.as_slice() == "hidden",
_ => false,
}
})
}
_ => false
}
}
}
fn build_module(cx: &core::DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> clean::Module {
let mut items = Vec::new();
fill_in(cx, tcx, did, &mut items);
return clean::Module {
items: items,
is_crate: false,
};
// FIXME: this doesn't handle reexports inside the module itself.
// Should they be handled?
fn fill_in(cx: &core::DocContext, tcx: &ty::ctxt, did: ast::DefId,
items: &mut Vec<clean::Item>) {
csearch::each_child_of_item(&tcx.sess.cstore, did, |def, _, vis| {
match def {
decoder::DlDef(def::DefForeignMod(did)) => {
fill_in(cx, tcx, did, items);
}
decoder::DlDef(def) if vis == ast::Public => {
match try_inline_def(cx, tcx, def) {
Some(i) => items.extend(i.move_iter()),
None => {}
}
}
decoder::DlDef(..) => {}
// All impls were inlined above
decoder::DlImpl(..) => {}
decoder::DlField => fail!("unimplemented field"),
}
});
}
}
fn build_static(tcx: &ty::ctxt,
did: ast::DefId,
mutable: bool) -> clean::Static {
clean::Static {
type_: ty::lookup_item_type(tcx, did).ty.clean(),
mutability: if mutable {clean::Mutable} else {clean::Immutable},
expr: "\n\n\n".to_string(), // trigger the "[definition]" links
}
} | }
// If this is the first time we've inlined something from this crate, then |
test_airconditioning.py | import pandas as pd
import csv | import pytest
import json
import xlrd
### テストファイル名 ###
# 辞書型 テスト名とファイル名
testcase_dict = {
"AHU_basic": "./tests/airconditioning/★空調設備テストケース一覧.xlsx",
}
def convert2number(x, default):
'''
空欄にデフォルト値を代入する
'''
if x == "":
x = default
else:
x = float(x)
return x
def read_testcasefile(filename):
'''
テストケースファイルを読み込む関数
'''
wb = xlrd.open_workbook(filename)
sheet = wb.sheet_by_name("Sheet1")
testdata = [sheet.row_values(row) for row in range(sheet.nrows)]
return testdata
#### テストケースファイルの読み込み
test_to_try = [] # テスト用入力ファイルと期待値のリスト
testcase_id = [] # テスト名称のリスト
for case_name in testcase_dict:
# テストファイルの読み込み
testfiledata = read_testcasefile(testcase_dict[case_name])
# ヘッダーの削除
testfiledata.pop(0)
# テストケース(行)に対するループ
for testdata in testfiledata:
filename = "./tests/airconditioning/ACtest_" + testdata[0] + ".json"
# 入力データの作成
with open(filename, 'r', encoding='utf-8') as f:
inputdata = json.load(f)
# 期待値
expectedvalue = (testdata[4])
# テストケースの集約
test_to_try.append( (inputdata, expectedvalue) )
# テストケース名
testcase_id.append(case_name + testdata[0])
# テストの実施
@pytest.mark.parametrize('inputdata, expectedvalue', test_to_try, ids=testcase_id)
def test_calc(inputdata, expectedvalue):
# 検証用
with open("inputdata.json",'w', encoding='utf-8') as fw:
json.dump(inputdata, fw, indent=4, ensure_ascii=False)
# 計算実行
resultJson = airconditioning.calc_energy(inputdata)
diff_Eac = (abs(resultJson["E_airconditioning"] - expectedvalue)) / abs( expectedvalue )
# 比較(0.01%まで)
assert diff_Eac < 0.0001
if __name__ == '__main__':
print('--- test_airconditioning.py ---') | from builelib import airconditioning |
provider.go | package oauth
import (
"fmt"
"net"
"net/http"
"time"
"weave/pkg/config"
"weave/pkg/model"
"golang.org/x/oauth2"
)
const (
GithubAuthType = "github"
WeChatAuthType = "wechat"
EmptyAuthType = "nil"
)
var (
defaultHttpClient = &http.Client{
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 5 * time.Second,
}).Dial,
TLSHandshakeTimeout: 5 * time.Second,
},
Timeout: 10 * time.Second,
}
)
func | (authType string) bool {
return authType == "" || authType == EmptyAuthType
}
type UserInfo struct {
ID string
AuthType string
Username string
DisplayName string
Email string
AvatarUrl string
}
func (ui *UserInfo) User() *model.User {
return &model.User{
AuthId: ui.ID,
AuthType: ui.AuthType,
Name: ui.Username,
Email: ui.Email,
Avatar: ui.AvatarUrl,
}
}
type OAuthManager struct {
conf map[string]config.OAuthConfig
}
func NewOAuthManager(conf map[string]config.OAuthConfig) *OAuthManager {
return &OAuthManager{
conf: conf,
}
}
func (m *OAuthManager) GetAuthProvider(authType string) (AuthProvider, error) {
var provider AuthProvider
conf, ok := m.conf[authType]
if !ok {
return nil, fmt.Errorf("auth type %s not found in config", authType)
}
switch authType {
case GithubAuthType:
provider = NewGithubAuth(conf.ClientId, conf.ClientSecret)
case WeChatAuthType:
provider = NewWeChatAuth(conf.ClientId, conf.ClientSecret)
default:
return nil, fmt.Errorf("unknown auth type: %s", authType)
}
return provider, nil
}
type AuthProvider interface {
GetToken(code string) (*oauth2.Token, error)
GetUserInfo(token *oauth2.Token) (*UserInfo, error)
}
| IsEmptyAuthType |
sandbox.rs | // Copyright 2018 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Workaround: Clippy does not correctly handle borrowing checking rules for returned types.
#![cfg_attr(feature = "cargo-clippy", allow(let_and_return))]
use std::ops::{AddAssign, Deref};
use std::sync::{Arc, Mutex};
use std::cell::{Ref, RefCell, RefMut};
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::collections::{BTreeSet, BinaryHeap, HashMap, HashSet, VecDeque};
use std::iter::FromIterator;
use futures::{self, Async, Future, Stream};
use futures::Sink;
use futures::sync::mpsc;
use node::{Configuration, ExternalMessage, ListenerConfig, NodeHandler, NodeSender, ServiceConfig,
State, SystemStateProvider, ApiSender};
use blockchain::{Block, BlockProof, Blockchain, ConsensusConfig, GenesisConfig, Schema, Service,
SharedNodeState, StoredConfiguration, TimeoutAdjusterConfig, Transaction,
ValidatorKeys};
use storage::{MapProof, MemoryDB};
use messages::{Any, Connect, Message, RawMessage, RawTransaction, Status};
use crypto::{Hash, PublicKey, SecretKey, Seed, gen_keypair, gen_keypair_from_seed};
use helpers::{Height, Milliseconds, Round, ValidatorId, user_agent};
use events::{Event, InternalEvent, EventHandler, NetworkEvent, NetworkRequest, TimeoutRequest,
InternalRequest};
use events::network::NetworkConfiguration;
use super::timestamping::TimestampingService;
use super::config_updater::ConfigUpdateService;
use super::sandbox_tests_helper::VALIDATOR_0;
pub type SharedTime = Arc<Mutex<SystemTime>>;
const INITIAL_TIME_IN_SECS: u64 = 1_486_720_340;
#[derive(Debug)]
pub struct SandboxSystemStateProvider {
listen_address: SocketAddr,
shared_time: SharedTime,
}
impl SystemStateProvider for SandboxSystemStateProvider {
fn current_time(&self) -> SystemTime {
*self.shared_time.lock().unwrap()
}
fn listen_address(&self) -> SocketAddr {
self.listen_address
}
}
#[derive(Debug)]
pub struct SandboxInner {
pub time: SharedTime,
pub handler: NodeHandler,
pub sent: VecDeque<(SocketAddr, RawMessage)>,
pub events: VecDeque<Event>,
pub timers: BinaryHeap<TimeoutRequest>,
pub network_requests_rx: mpsc::Receiver<NetworkRequest>,
pub internal_requests_rx: mpsc::Receiver<InternalRequest>,
pub api_requests_rx: mpsc::Receiver<ExternalMessage>,
}
impl SandboxInner {
pub fn process_events(&mut self) {
self.process_internal_requests();
self.process_api_requests();
self.process_network_requests();
self.process_internal_requests();
}
pub fn handle_event<E: Into<Event>>(&mut self, e: E) {
self.handler.handle_event(e.into());
self.process_events();
}
fn process_network_requests(&mut self) {
let network_getter = futures::lazy(|| -> Result<(), ()> {
while let Async::Ready(Some(network)) = self.network_requests_rx.poll()? {
match network {
NetworkRequest::SendMessage(peer, msg) => self.sent.push_back((peer, msg)),
NetworkRequest::DisconnectWithPeer(_) |
NetworkRequest::Shutdown => {}
}
}
Ok(())
});
network_getter.wait().unwrap();
}
fn process_internal_requests(&mut self) {
let internal_getter = futures::lazy(|| -> Result<(), ()> {
while let Async::Ready(Some(internal)) = self.internal_requests_rx.poll()? {
match internal {
InternalRequest::Timeout(t) => self.timers.push(t),
InternalRequest::JumpToRound(height, round) => {
self.handler.handle_event(
InternalEvent::JumpToRound(height, round).into(),
)
}
InternalRequest::Shutdown => unimplemented!(),
}
}
Ok(())
});
internal_getter.wait().unwrap();
}
fn process_api_requests(&mut self) {
let api_getter = futures::lazy(|| -> Result<(), ()> {
while let Async::Ready(Some(api)) = self.api_requests_rx.poll()? {
self.handler.handle_event(api.into());
}
Ok(())
});
api_getter.wait().unwrap();
}
}
pub struct Sandbox {
pub validators_map: HashMap<PublicKey, SecretKey>,
pub services_map: HashMap<PublicKey, SecretKey>,
inner: RefCell<SandboxInner>,
addresses: Vec<SocketAddr>,
/// Connect message used during initialization.
connect: Option<Connect>,
}
impl Sandbox {
pub fn initialize(
&mut self,
connect_message_time: SystemTime,
start_index: usize,
end_index: usize,
) {
let connect = Connect::new(
&self.p(VALIDATOR_0),
self.a(VALIDATOR_0),
connect_message_time.into(),
&user_agent::get(),
self.s(VALIDATOR_0),
);
for validator in start_index..end_index {
let validator = ValidatorId(validator as u16);
self.recv(&Connect::new(
&self.p(validator),
self.a(validator),
self.time().into(),
&user_agent::get(),
self.s(validator),
));
self.send(self.a(validator), &connect);
}
self.check_unexpected_message();
self.connect = Some(connect);
}
fn check_unexpected_message(&self) {
if let Some((addr, msg)) = self.inner.borrow_mut().sent.pop_front() {
let any_msg = Any::from_raw(msg.clone()).expect("Send incorrect message");
panic!("Send unexpected message {:?} to {}", any_msg, addr);
}
}
pub fn p(&self, id: ValidatorId) -> PublicKey {
self.validators()[id.0 as usize]
}
pub fn s(&self, id: ValidatorId) -> &SecretKey {
let p = self.p(id);
&self.validators_map[&p]
}
pub fn a(&self, id: ValidatorId) -> SocketAddr {
let id: usize = id.into();
self.addresses[id]
}
pub fn validators(&self) -> Vec<PublicKey> {
self.cfg()
.validator_keys
.iter()
.map(|x| x.consensus_key)
.collect()
}
pub fn n_validators(&self) -> usize {
self.validators().len()
}
pub fn time(&self) -> SystemTime {
let inner = self.inner.borrow();
let time = *inner.time.lock().unwrap().deref();
time
}
pub fn set_time(&mut self, new_time: SystemTime) {
let mut inner = self.inner.borrow_mut();
*inner.time.lock().unwrap() = new_time;
}
pub fn node_handler_mut(&self) -> RefMut<NodeHandler> {
RefMut::map(self.inner.borrow_mut(), |inner| &mut inner.handler)
}
pub fn node_state(&self) -> Ref<State> {
Ref::map(self.inner.borrow(), |inner| inner.handler.state())
}
pub fn blockchain_ref(&self) -> Ref<Blockchain> {
Ref::map(self.inner.borrow(), |inner| &inner.handler.blockchain)
}
pub fn blockchain_mut(&self) -> RefMut<Blockchain> {
RefMut::map(
self.inner.borrow_mut(),
|inner| &mut inner.handler.blockchain,
)
}
/// Returns connect message used during initialization.
pub fn connect(&self) -> Option<&Connect> {
self.connect.as_ref()
}
pub fn recv<T: Message>(&self, msg: &T) {
self.check_unexpected_message();
// TODO Think about addresses.
let dummy_addr = SocketAddr::from(([127, 0, 0, 1], 12_039));
let event = NetworkEvent::MessageReceived(dummy_addr, msg.raw().clone());
self.inner.borrow_mut().handle_event(event);
}
pub fn process_events(&self) {
self.inner.borrow_mut().process_events();
}
pub fn send<T: Message>(&self, addr: SocketAddr, msg: &T) {
self.process_events();
let any_expected_msg = Any::from_raw(msg.raw().clone()).unwrap();
let send = self.inner.borrow_mut().sent.pop_front();
if let Some((real_addr, real_msg)) = send {
let any_real_msg = Any::from_raw(real_msg.clone()).expect("Send incorrect message");
if real_addr != addr || any_real_msg != any_expected_msg {
panic!(
"Expected to send the message {:?} to {} instead sending {:?} to {}",
any_expected_msg,
addr,
any_real_msg,
real_addr
)
}
} else {
panic!(
"Expected to send the message {:?} to {} but nothing happened",
any_expected_msg,
addr
);
}
}
pub fn broadcast<T: Message>(&self, msg: &T) {
self.broadcast_to_addrs(msg, self.addresses.iter().skip(1));
}
pub fn try_broadcast<T: Message>(&self, msg: &T) -> Result<(), String> {
self.try_broadcast_to_addrs(msg, self.addresses.iter().skip(1))
}
// TODO: add self-test for broadcasting?
pub fn broadcast_to_addrs<'a, T: Message, I>(&self, msg: &T, addresses: I)
where
I: IntoIterator<Item = &'a SocketAddr>,
{
self.try_broadcast_to_addrs(msg, addresses).unwrap();
}
// TODO: add self-test for broadcasting?
pub fn try_broadcast_to_addrs<'a, T: Message, I>(
&self,
msg: &T,
addresses: I,
) -> Result<(), String>
where
I: IntoIterator<Item = &'a SocketAddr>,
{
let any_expected_msg = Any::from_raw(msg.raw().clone()).unwrap();
// If node is excluded from validators, then it still will broadcast messages.
// So in that case we should not skip addresses and validators count.
let mut expected_set: HashSet<_> = HashSet::from_iter(addresses);
for _ in 0..expected_set.len() {
let send = self.inner.borrow_mut().sent.pop_front();
if let Some((real_addr, real_msg)) = send {
let any_real_msg = Any::from_raw(real_msg.clone()).expect("Send incorrect message");
if any_real_msg != any_expected_msg {
return Err(format!(
"Expected to broadcast the message {:?} instead sending {:?} to {}",
any_expected_msg,
any_real_msg,
real_addr
));
}
if !expected_set.contains(&real_addr) {
panic!(
"Double send the same message {:?} to {:?} during broadcasting",
any_expected_msg,
real_addr
)
} else {
expected_set.remove(&real_addr);
}
} else {
panic!(
"Expected to broadcast the message {:?} but someone don't receive \
messages: {:?}",
any_expected_msg,
expected_set
);
}
}
Ok(())
}
pub fn check_broadcast_status(&self, height: Height, block_hash: &Hash) {
self.broadcast(&Status::new(
&self.node_public_key(),
height,
block_hash,
&self.node_secret_key(),
));
}
pub fn add_time(&self, duration: Duration) {
self.check_unexpected_message();
let now = {
let inner = self.inner.borrow_mut();
let mut time = inner.time.lock().unwrap();
time.add_assign(duration);
*time.deref()
};
// handle timeouts if occurs
loop {
let timeout = {
let timers = &mut self.inner.borrow_mut().timers;
if let Some(TimeoutRequest(time, timeout)) = timers.pop() {
if time > now {
timers.push(TimeoutRequest(time, timeout));
break;
} else {
timeout
}
} else {
break;
}
};
self.inner.borrow_mut().handle_event(timeout);
}
}
pub fn is_leader(&self) -> bool {
self.node_state().is_leader()
}
pub fn leader(&self, round: Round) -> ValidatorId {
self.node_state().leader(round)
}
pub fn last_block(&self) -> Block {
self.blockchain_ref().last_block()
}
pub fn last_hash(&self) -> Hash {
self.blockchain_ref().last_hash()
}
pub fn last_state_hash(&self) -> Hash {
*self.last_block().state_hash()
}
pub fn filter_present_transactions<'a, I>(&self, txs: I) -> Vec<RawMessage>
where
I: IntoIterator<Item = &'a RawMessage>,
{
let mut unique_set: HashSet<Hash> = HashSet::new();
let snapshot = self.blockchain_ref().snapshot();
let schema = Schema::new(&snapshot);
let schema_transactions = schema.transactions();
txs.into_iter()
.filter(|elem| {
let hash_elem = elem.hash();
if unique_set.contains(&hash_elem) {
return false;
}
unique_set.insert(hash_elem);
if schema_transactions.contains(&hash_elem) {
return false;
}
true
})
.cloned()
.collect()
}
/// Extracts state_hash from the fake block.
pub fn compute_state_hash<'a, I>(&self, txs: I) -> Hash
where
I: IntoIterator<Item = &'a RawTransaction>,
{
let height = self.current_height();
let mut blockchain = self.blockchain_mut();
let (hashes, recover, patch) = {
let mut hashes = Vec::new();
let mut recover = BTreeSet::new();
let mut fork = blockchain.fork();
{
let mut schema = Schema::new(&mut fork);
for raw in txs {
let hash = raw.hash();
hashes.push(hash);
if schema.transactions().get(&hash).is_none() {
recover.insert(hash);
schema.add_transaction_into_pool(raw.clone());
}
}
}
(hashes, recover, fork.into_patch())
};
blockchain.merge(patch).unwrap();
let fork = {
let mut fork = blockchain.fork();
let (_, patch) = blockchain.create_patch(ValidatorId(0), height, &hashes);
fork.merge(patch);
fork
};
let patch = {
let mut fork = blockchain.fork();
{
let mut schema = Schema::new(&mut fork);
for hash in recover {
schema.reject_transaction(&hash).unwrap();
}
}
fork.into_patch()
};
blockchain.merge(patch).unwrap();
*Schema::new(&fork).last_block().state_hash()
}
pub fn get_proof_to_service_table(
&self,
service_id: u16,
table_idx: usize,
) -> MapProof<Hash, Hash> {
let snapshot = self.blockchain_ref().snapshot();
let schema = Schema::new(&snapshot);
schema.get_proof_to_service_table(service_id, table_idx)
}
pub fn get_configs_merkle_root(&self) -> Hash {
let snapshot = self.blockchain_ref().snapshot();
let schema = Schema::new(&snapshot);
schema.configs().merkle_root()
}
pub fn cfg(&self) -> StoredConfiguration {
let snapshot = self.blockchain_ref().snapshot();
let schema = Schema::new(&snapshot);
schema.actual_configuration()
}
pub fn propose_timeout(&self) -> Milliseconds {
match self.cfg().consensus.timeout_adjuster {
TimeoutAdjusterConfig::Constant { timeout } => timeout,
_ => panic!("Unexpected timeout adjuster config type"),
}
}
pub fn majority_count(&self, num_validators: usize) -> usize {
num_validators * 2 / 3 + 1
}
pub fn round_timeout(&self) -> Milliseconds {
self.cfg().consensus.round_timeout
}
pub fn transactions_hashes(&self) -> Vec<Hash> {
let schema = Schema::new(self.blockchain_ref().snapshot());
let idx = schema.transactions_pool();
let vec = idx.iter().collect();
vec
}
pub fn current_round(&self) -> Round {
self.node_state().round()
}
pub fn block_and_precommits(&self, height: Height) -> Option<BlockProof> {
let snapshot = self.blockchain_ref().snapshot();
let schema = Schema::new(&snapshot);
schema.block_and_precommits(height)
}
pub fn current_height(&self) -> Height {
self.node_state().height()
}
pub fn current_leader(&self) -> ValidatorId {
self.node_state().leader(self.current_round())
}
pub fn assert_state(&self, expected_height: Height, expected_round: Round) {
let state = self.node_state();
let actual_height = state.height();
let actual_round = state.round();
assert_eq!(actual_height, expected_height);
assert_eq!(actual_round, expected_round);
}
pub fn assert_lock(&self, expected_round: Round, expected_hash: Option<Hash>) {
let state = self.node_state();
let actual_round = state.locked_round();
let actual_hash = state.locked_propose();
assert_eq!(actual_round, expected_round);
assert_eq!(actual_hash, expected_hash);
}
/// Creates new sandbox with "restarted" node.
pub fn restart(self) -> Self {
self.restart_with_time(UNIX_EPOCH + Duration::new(INITIAL_TIME_IN_SECS, 0))
}
/// Creates new sandbox with "restarted" node initialized by the given time.
pub fn restart_with_time(self, time: SystemTime) -> Self {
let connect = self.connect().map(|c| {
Connect::new(
c.pub_key(),
c.addr(),
time.into(),
c.user_agent(),
self.s(VALIDATOR_0),
)
});
let sandbox = self.restart_uninitialized_with_time(time);
if let Some(connect) = connect {
sandbox.broadcast(&connect);
}
sandbox
}
/// Constructs a new uninitialized instance of a `Sandbox` preserving database and
/// configuration.
pub fn restart_uninitialized(self) -> Sandbox {
self.restart_uninitialized_with_time(UNIX_EPOCH + Duration::new(INITIAL_TIME_IN_SECS, 0))
}
/// Constructs a new uninitialized instance of a `Sandbox` preserving database and
/// configuration.
pub fn restart_uninitialized_with_time(self, time: SystemTime) -> Sandbox {
let network_channel = mpsc::channel(100);
let internal_channel = mpsc::channel(100);
let api_channel = mpsc::channel(100);
let address = self.a(VALIDATOR_0);
let inner = self.inner.borrow();
let blockchain = inner.handler.blockchain.clone_with_api_sender(
ApiSender::new(
api_channel.0.clone(),
),
);
let node_sender = NodeSender { | internal_requests: internal_channel.0.clone().wait(),
api_requests: api_channel.0.clone().wait(),
};
let config = Configuration {
listener: ListenerConfig {
address,
consensus_public_key: *inner.handler.state.consensus_public_key(),
consensus_secret_key: inner.handler.state.consensus_secret_key().clone(),
whitelist: Default::default(),
},
service: ServiceConfig {
service_public_key: *inner.handler.state.service_public_key(),
service_secret_key: inner.handler.state.service_secret_key().clone(),
},
network: NetworkConfiguration::default(),
peer_discovery: Vec::new(),
mempool: Default::default(),
};
let system_state = SandboxSystemStateProvider {
listen_address: address,
shared_time: SharedTime::new(Mutex::new(time)),
};
let mut handler = NodeHandler::new(
blockchain,
address,
node_sender,
Box::new(system_state),
config,
inner.handler.api_state.clone(),
);
handler.initialize();
let inner = SandboxInner {
sent: VecDeque::new(),
events: VecDeque::new(),
timers: BinaryHeap::new(),
internal_requests_rx: internal_channel.1,
network_requests_rx: network_channel.1,
api_requests_rx: api_channel.1,
handler,
time: Arc::clone(&inner.time),
};
let sandbox = Sandbox {
inner: RefCell::new(inner),
validators_map: self.validators_map.clone(),
services_map: self.services_map.clone(),
addresses: self.addresses.clone(),
connect: None,
};
sandbox.process_events();
sandbox
}
fn node_public_key(&self) -> PublicKey {
*self.node_state().consensus_public_key()
}
fn node_secret_key(&self) -> SecretKey {
self.node_state().consensus_secret_key().clone()
}
}
impl Drop for Sandbox {
fn drop(&mut self) {
if !::std::thread::panicking() {
self.check_unexpected_message();
}
}
}
fn gen_primitive_socket_addr(idx: u8) -> SocketAddr {
let addr = Ipv4Addr::new(idx, idx, idx, idx);
SocketAddr::new(IpAddr::V4(addr), u16::from(idx))
}
/// Constructs an instance of a `Sandbox` and initializes connections.
pub fn sandbox_with_services(services: Vec<Box<Service>>) -> Sandbox {
let mut sandbox = sandbox_with_services_uninitialized(services);
let time = sandbox.time();
let validators_count = sandbox.validators_map.len();
sandbox.initialize(time, 1, validators_count);
sandbox
}
/// Constructs an uninitialized instance of a `Sandbox`.
pub fn sandbox_with_services_uninitialized(services: Vec<Box<Service>>) -> Sandbox {
let validators = vec![
gen_keypair_from_seed(&Seed::new([12; 32])),
gen_keypair_from_seed(&Seed::new([13; 32])),
gen_keypair_from_seed(&Seed::new([16; 32])),
gen_keypair_from_seed(&Seed::new([19; 32])),
];
let service_keys = vec![
gen_keypair_from_seed(&Seed::new([20; 32])),
gen_keypair_from_seed(&Seed::new([21; 32])),
gen_keypair_from_seed(&Seed::new([22; 32])),
gen_keypair_from_seed(&Seed::new([23; 32])),
];
let addresses: Vec<SocketAddr> = (1..5).map(gen_primitive_socket_addr).collect::<Vec<_>>();
let api_channel = mpsc::channel(100);
let db = MemoryDB::new();
let mut blockchain = Blockchain::new(
db,
services,
service_keys[0].0,
service_keys[0].1.clone(),
ApiSender::new(api_channel.0.clone()),
);
let consensus = ConsensusConfig {
round_timeout: 1000,
status_timeout: 600_000,
peers_timeout: 600_000,
txs_block_limit: 1000,
max_message_len: 1024 * 1024,
timeout_adjuster: TimeoutAdjusterConfig::Constant { timeout: 200 },
};
let genesis = GenesisConfig::new_with_consensus(
consensus,
validators.iter().zip(service_keys.iter()).map(|x| {
ValidatorKeys {
consensus_key: (x.0).0,
service_key: (x.1).0,
}
}),
);
blockchain.initialize(genesis).unwrap();
let config = Configuration {
listener: ListenerConfig {
address: addresses[0],
consensus_public_key: validators[0].0,
consensus_secret_key: validators[0].1.clone(),
whitelist: Default::default(),
},
service: ServiceConfig {
service_public_key: service_keys[0].0,
service_secret_key: service_keys[0].1.clone(),
},
network: NetworkConfiguration::default(),
peer_discovery: Vec::new(),
mempool: Default::default(),
};
// TODO use factory or other solution like set_handler or run
let system_state = SandboxSystemStateProvider {
listen_address: addresses[0],
shared_time: SharedTime::new(Mutex::new(
UNIX_EPOCH + Duration::new(INITIAL_TIME_IN_SECS, 0),
)),
};
let shared_time = Arc::clone(&system_state.shared_time);
let network_channel = mpsc::channel(100);
let internal_channel = mpsc::channel(100);
let node_sender = NodeSender {
network_requests: network_channel.0.clone().wait(),
internal_requests: internal_channel.0.clone().wait(),
api_requests: api_channel.0.clone().wait(),
};
let mut handler = NodeHandler::new(
blockchain.clone(),
addresses[0],
node_sender,
Box::new(system_state),
config.clone(),
SharedNodeState::new(5000),
);
handler.initialize();
let inner = SandboxInner {
sent: VecDeque::new(),
events: VecDeque::new(),
timers: BinaryHeap::new(),
network_requests_rx: network_channel.1,
api_requests_rx: api_channel.1,
internal_requests_rx: internal_channel.1,
handler,
time: shared_time,
};
let sandbox = Sandbox {
inner: RefCell::new(inner),
validators_map: HashMap::from_iter(validators.clone()),
services_map: HashMap::from_iter(service_keys),
addresses,
connect: None,
};
// General assumption; necessary for correct work of consensus algorithm
assert!(sandbox.propose_timeout() < sandbox.round_timeout());
sandbox.process_events();
sandbox
}
pub fn timestamping_sandbox() -> Sandbox {
sandbox_with_services(vec![
Box::new(TimestampingService::new()),
Box::new(ConfigUpdateService::new()),
])
}
#[cfg(test)]
mod tests {
use super::*;
use blockchain::{ServiceContext, ExecutionResult, TransactionSet};
use messages::RawTransaction;
use encoding;
use crypto::{gen_keypair_from_seed, Seed};
use storage::{Fork, Snapshot};
use sandbox::sandbox_tests_helper::{add_one_height, SandboxState, VALIDATOR_1, VALIDATOR_2,
VALIDATOR_3, HEIGHT_ONE, ROUND_ONE, ROUND_TWO};
const SERVICE_ID: u16 = 1;
transactions! {
HandleCommitTransactions {
const SERVICE_ID = SERVICE_ID;
struct TxAfterCommit {
height: Height,
}
}
}
impl TxAfterCommit {
pub fn new_with_height(height: Height) -> TxAfterCommit {
let keypair = gen_keypair_from_seed(&Seed::new([22; 32]));
TxAfterCommit::new(height, &keypair.1)
}
}
impl Transaction for TxAfterCommit {
fn verify(&self) -> bool {
true
}
fn execute(&self, _: &mut Fork) -> ExecutionResult {
Ok(())
}
}
struct HandleCommitService;
impl Service for HandleCommitService {
fn service_name(&self) -> &str {
"handle_commit"
}
fn service_id(&self) -> u16 {
SERVICE_ID
}
fn state_hash(&self, _: &Snapshot) -> Vec<Hash> {
Vec::new()
}
fn tx_from_raw(&self, raw: RawTransaction) -> Result<Box<Transaction>, encoding::Error> {
let tx = HandleCommitTransactions::tx_from_raw(raw)?;
Ok(tx.into())
}
fn handle_commit(&self, context: &ServiceContext) {
let tx = TxAfterCommit::new_with_height(context.height());
context.transaction_sender().send(Box::new(tx)).unwrap();
}
}
#[test]
fn test_sandbox_init() {
timestamping_sandbox();
}
#[test]
fn test_sandbox_recv_and_send() {
let s = timestamping_sandbox();
let (public, secret) = gen_keypair();
s.recv(&Connect::new(
&public,
s.a(VALIDATOR_2),
s.time().into(),
&user_agent::get(),
&secret,
));
s.send(
s.a(VALIDATOR_2),
&Connect::new(
&s.p(VALIDATOR_0),
s.a(VALIDATOR_0),
s.time().into(),
&user_agent::get(),
s.s(VALIDATOR_0),
),
);
}
#[test]
fn test_sandbox_assert_status() {
// TODO: remove this?
let s = timestamping_sandbox();
s.assert_state(HEIGHT_ONE, ROUND_ONE);
s.add_time(Duration::from_millis(999));
s.assert_state(HEIGHT_ONE, ROUND_ONE);
s.add_time(Duration::from_millis(1));
s.assert_state(HEIGHT_ONE, ROUND_TWO);
}
#[test]
#[should_panic(expected = "Expected to send the message")]
fn test_sandbox_expected_to_send_but_nothing_happened() {
let s = timestamping_sandbox();
s.send(
s.a(VALIDATOR_1),
&Connect::new(
&s.p(VALIDATOR_0),
s.a(VALIDATOR_0),
s.time().into(),
&user_agent::get(),
s.s(VALIDATOR_0),
),
);
}
#[test]
#[should_panic(expected = "Expected to send the message")]
fn test_sandbox_expected_to_send_another_message() {
let s = timestamping_sandbox();
let (public, secret) = gen_keypair();
s.recv(&Connect::new(
&public,
s.a(VALIDATOR_2),
s.time().into(),
&user_agent::get(),
&secret,
));
s.send(
s.a(VALIDATOR_1),
&Connect::new(
&s.p(VALIDATOR_0),
s.a(VALIDATOR_0),
s.time().into(),
&user_agent::get(),
s.s(VALIDATOR_0),
),
);
}
#[test]
#[should_panic(expected = "Send unexpected message")]
fn test_sandbox_unexpected_message_when_drop() {
let s = timestamping_sandbox();
let (public, secret) = gen_keypair();
s.recv(&Connect::new(
&public,
s.a(VALIDATOR_2),
s.time().into(),
&user_agent::get(),
&secret,
));
}
#[test]
#[should_panic(expected = "Send unexpected message")]
fn test_sandbox_unexpected_message_when_handle_another_message() {
let s = timestamping_sandbox();
let (public, secret) = gen_keypair();
s.recv(&Connect::new(
&public,
s.a(VALIDATOR_2),
s.time().into(),
&user_agent::get(),
&secret,
));
s.recv(&Connect::new(
&public,
s.a(VALIDATOR_3),
s.time().into(),
&user_agent::get(),
&secret,
));
panic!("Oops! We don't catch unexpected message");
}
#[test]
#[should_panic(expected = "Send unexpected message")]
fn test_sandbox_unexpected_message_when_time_changed() {
let s = timestamping_sandbox();
let (public, secret) = gen_keypair();
s.recv(&Connect::new(
&public,
s.a(VALIDATOR_2),
s.time().into(),
&user_agent::get(),
&secret,
));
s.add_time(Duration::from_millis(1000));
panic!("Oops! We don't catch unexpected message");
}
#[test]
fn test_sandbox_service_handle_commit() {
let sandbox = sandbox_with_services(vec![
Box::new(HandleCommitService),
Box::new(TimestampingService::new()),
]);
let state = SandboxState::new();
add_one_height(&sandbox, &state);
let tx = TxAfterCommit::new_with_height(Height(1));
sandbox.broadcast(&tx);
}
} | network_requests: network_channel.0.clone().wait(), |
user.service.js | import httpClient from "./http-client";
export default {
fbSignin: accessToken => {
return httpClient.post(`fb-signin`, { accessToken: accessToken });
},
getLoggedInUserProfile: () => {
return httpClient.get("me");
},
getUserProfile: username => {
return httpClient.get("user/" + username);
},
updateUserProfile: profile => {
return httpClient.put("me", profile); | },
signout: () => {
localStorage.removeItem("authToken");
},
getActivities: () => {
return httpClient.get("useractivity");
},
udpateUserPreferences: userPreferences => {
return httpClient.put("user/preferences", userPreferences);
},
getReferrals: () => {
return httpClient.get("user/get/referrals");
},
getAllUsers(searchText) {
return httpClient.get(`users?searchText=${searchText}`);
},
getUserByUserId(userId) {
return httpClient.get(`users?userId=${userId}`);
}
}; | |
test-comp40.component.ts | import { Component, OnInit } from '@angular/core';
@Component({
selector: 'app-test-comp40',
template: `
<div>
<p>Hello {{ title }}</p>
</div>
`,
styles: []
})
export class TestComp40Component implements OnInit {
title: string = 'world';
ngOnInit() {
this.title = "Luxembourg";
} | } |
|
parse_test.go | // Package structql implements the Database structure.
// This file contains tests for parse.go.
package structql
import (
"fmt"
"reflect"
"testing"
)
// TestParseResponse tests the parseResponse() method.
func TestParseResponse(t *testing.T) {
creds := GetTestCreds()
type Person struct {
Name string `sql:"name"`
Age int32 `sql:"age"`
Mass float32 `sql:"mass"`
}
adam := Person{"Adam", 10, 242.0}
brad := Person{"Brad", 20, 199.9}
chad := Person{"Chad", 30, 206.9}
tests := []struct {
query string
wantPeople []Person
}{
{
`SELECT * FROM People WHERE name = 'Duke'`,
[]Person{},
}, {
`SELECT * FROM People WHERE name = 'Adam'`,
[]Person{adam},
}, {
`SELECT * FROM People WHERE age >= 20`,
[]Person{brad, chad},
},
}
// Create a suitable table in the test database.
conn, err := Connect(creds)
if err != nil {
t.Fatalf("Failed to connect to database: %v.", err)
}
if _, err := conn.exec(`CREATE TABLE People (name TEXT, age INT, mass FLOAT4);`); err != nil {
t.Fatalf("Failed to create table: %v.", err)
}
defer func() {
conn.exec(`DROP TABLE People;`)
conn.Close()
}()
// Add Adam, Brad, and Chad to the database.
for _, person := range []Person{adam, brad, chad} {
cmd := fmt.Sprintf("INSERT INTO People (name, age, mass) VALUES ('%s', %d, %f);", person.Name, person.Age, person.Mass)
if _, err := conn.exec(cmd); err != nil {
t.Fatalf("Failed to insert Person %q: %v.", person.Name, err)
}
}
for i, test := range tests {
rows, err := conn.query(test.query)
if err != nil {
t.Errorf("TestParseResponse()[%d] - failed to execute query: %v.", i, err)
continue
}
havePeople, err := parseResponse(rows, Person{})
| }
if len(havePeople) != len(test.wantPeople) {
t.Errorf("TestParseResponse()[%d] = %d, want %d people.", i, len(havePeople), len(test.wantPeople))
continue
}
for j, havePerson := range havePeople {
wantPerson := test.wantPeople[j]
if !reflect.DeepEqual(havePerson, wantPerson) {
t.Errorf("TestParseResponse()[%d][%d] = %v, want Person %v.", i, j, havePerson, wantPerson)
}
}
}
} | if err != nil {
t.Errorf("TestParseResponse()[%d] - failed to parse response: %v.", i, err)
continue
|
test_iostream.py | from pybind11_tests import iostream as m
import sys
from contextlib import contextmanager
try:
# Python 3
from io import StringIO
except ImportError:
# Python 2
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
# Python 3.4
from contextlib import redirect_stdout
except ImportError:
@contextmanager
def redirect_stdout(target):
original = sys.stdout
sys.stdout = target
yield
sys.stdout = original
try:
# Python 3.5
from contextlib import redirect_stderr
except ImportError:
@contextmanager
def redirect_stderr(target):
original = sys.stderr
sys.stderr = target
yield
sys.stderr = original
def test_captured(capsys):
|
def test_captured_large_string(capsys):
# Make this bigger than the buffer used on the C++ side: 1024 chars
msg = "I've been redirected to Python, I hope!"
msg = msg * (1024 // len(msg) + 1)
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ''
def test_guard_capture(capsys):
msg = "I've been redirected to Python, I hope!"
m.guard_output(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ''
def test_series_captured(capture):
with capture:
m.captured_output("a")
m.captured_output("b")
assert capture == "ab"
def test_flush(capfd):
msg = "(not flushed)"
msg2 = "(flushed)"
with m.ostream_redirect():
m.noisy_function(msg, flush=False)
stdout, stderr = capfd.readouterr()
assert stdout == ''
m.noisy_function(msg2, flush=True)
stdout, stderr = capfd.readouterr()
assert stdout == msg + msg2
m.noisy_function(msg, flush=False)
stdout, stderr = capfd.readouterr()
assert stdout == msg
def test_not_captured(capfd):
msg = "Something that should not show up in log"
stream = StringIO()
with redirect_stdout(stream):
m.raw_output(msg)
stdout, stderr = capfd.readouterr()
assert stdout == msg
assert stderr == ''
assert stream.getvalue() == ''
stream = StringIO()
with redirect_stdout(stream):
m.captured_output(msg)
stdout, stderr = capfd.readouterr()
assert stdout == ''
assert stderr == ''
assert stream.getvalue() == msg
def test_err(capfd):
msg = "Something that should not show up in log"
stream = StringIO()
with redirect_stderr(stream):
m.raw_err(msg)
stdout, stderr = capfd.readouterr()
assert stdout == ''
assert stderr == msg
assert stream.getvalue() == ''
stream = StringIO()
with redirect_stderr(stream):
m.captured_err(msg)
stdout, stderr = capfd.readouterr()
assert stdout == ''
assert stderr == ''
assert stream.getvalue() == msg
def test_multi_captured(capfd):
stream = StringIO()
with redirect_stdout(stream):
m.captured_output("a")
m.raw_output("b")
m.captured_output("c")
m.raw_output("d")
stdout, stderr = capfd.readouterr()
assert stdout == 'bd'
assert stream.getvalue() == 'ac'
def test_dual(capsys):
m.captured_dual("a", "b")
stdout, stderr = capsys.readouterr()
assert stdout == "a"
assert stderr == "b"
def test_redirect(capfd):
msg = "Should not be in log!"
stream = StringIO()
with redirect_stdout(stream):
m.raw_output(msg)
stdout, stderr = capfd.readouterr()
assert stdout == msg
assert stream.getvalue() == ''
stream = StringIO()
with redirect_stdout(stream):
with m.ostream_redirect():
m.raw_output(msg)
stdout, stderr = capfd.readouterr()
assert stdout == ''
assert stream.getvalue() == msg
stream = StringIO()
with redirect_stdout(stream):
m.raw_output(msg)
stdout, stderr = capfd.readouterr()
assert stdout == msg
assert stream.getvalue() == ''
def test_redirect_err(capfd):
msg = "StdOut"
msg2 = "StdErr"
stream = StringIO()
with redirect_stderr(stream):
with m.ostream_redirect(stdout=False):
m.raw_output(msg)
m.raw_err(msg2)
stdout, stderr = capfd.readouterr()
assert stdout == msg
assert stderr == ''
assert stream.getvalue() == msg2
def test_redirect_both(capfd):
msg = "StdOut"
msg2 = "StdErr"
stream = StringIO()
stream2 = StringIO()
with redirect_stdout(stream):
with redirect_stderr(stream2):
with m.ostream_redirect():
m.raw_output(msg)
m.raw_err(msg2)
stdout, stderr = capfd.readouterr()
assert stdout == ''
assert stderr == ''
assert stream.getvalue() == msg
assert stream2.getvalue() == msg2
| msg = "I've been redirected to Python, I hope!"
m.captured_output(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ''
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ''
m.captured_err(msg)
stdout, stderr = capsys.readouterr()
assert stdout == ''
assert stderr == msg |
extras_webhooks_create_parameters.go | // Code generated by go-swagger; DO NOT EDIT.
// Copyright 2020 The go-netbox Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package extras
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
"github.com/tomasherout/go-netbox/netbox/models"
)
// NewExtrasWebhooksCreateParams creates a new ExtrasWebhooksCreateParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func | () *ExtrasWebhooksCreateParams {
return &ExtrasWebhooksCreateParams{
timeout: cr.DefaultTimeout,
}
}
// NewExtrasWebhooksCreateParamsWithTimeout creates a new ExtrasWebhooksCreateParams object
// with the ability to set a timeout on a request.
func NewExtrasWebhooksCreateParamsWithTimeout(timeout time.Duration) *ExtrasWebhooksCreateParams {
return &ExtrasWebhooksCreateParams{
timeout: timeout,
}
}
// NewExtrasWebhooksCreateParamsWithContext creates a new ExtrasWebhooksCreateParams object
// with the ability to set a context for a request.
func NewExtrasWebhooksCreateParamsWithContext(ctx context.Context) *ExtrasWebhooksCreateParams {
return &ExtrasWebhooksCreateParams{
Context: ctx,
}
}
// NewExtrasWebhooksCreateParamsWithHTTPClient creates a new ExtrasWebhooksCreateParams object
// with the ability to set a custom HTTPClient for a request.
func NewExtrasWebhooksCreateParamsWithHTTPClient(client *http.Client) *ExtrasWebhooksCreateParams {
return &ExtrasWebhooksCreateParams{
HTTPClient: client,
}
}
/* ExtrasWebhooksCreateParams contains all the parameters to send to the API endpoint
for the extras webhooks create operation.
Typically these are written to a http.Request.
*/
type ExtrasWebhooksCreateParams struct {
// Data.
Data *models.Webhook
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the extras webhooks create params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *ExtrasWebhooksCreateParams) WithDefaults() *ExtrasWebhooksCreateParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the extras webhooks create params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *ExtrasWebhooksCreateParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the extras webhooks create params
func (o *ExtrasWebhooksCreateParams) WithTimeout(timeout time.Duration) *ExtrasWebhooksCreateParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the extras webhooks create params
func (o *ExtrasWebhooksCreateParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the extras webhooks create params
func (o *ExtrasWebhooksCreateParams) WithContext(ctx context.Context) *ExtrasWebhooksCreateParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the extras webhooks create params
func (o *ExtrasWebhooksCreateParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the extras webhooks create params
func (o *ExtrasWebhooksCreateParams) WithHTTPClient(client *http.Client) *ExtrasWebhooksCreateParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the extras webhooks create params
func (o *ExtrasWebhooksCreateParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithData adds the data to the extras webhooks create params
func (o *ExtrasWebhooksCreateParams) WithData(data *models.Webhook) *ExtrasWebhooksCreateParams {
o.SetData(data)
return o
}
// SetData adds the data to the extras webhooks create params
func (o *ExtrasWebhooksCreateParams) SetData(data *models.Webhook) {
o.Data = data
}
// WriteToRequest writes these params to a swagger request
func (o *ExtrasWebhooksCreateParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if o.Data != nil {
if err := r.SetBodyParam(o.Data); err != nil {
return err
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
| NewExtrasWebhooksCreateParams |
rule_based_decision_making.py | # This FILE is part of multi-legged robot field exploration model
# env_wrapper.py - to obtain user interaction data from website
#
# This programm is explained by roboLAND in university of southern california.
# Please notify the source if you use it
#
# Copyright(c) 2021-2025 Ryoma Liu
# Email: [email protected]
from env_wrapper import *
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import optimize
import random
import matplotlib.pylab as pylab
import numpy as np
from PIL import Image
from math import *
class rule_state_machine:
def __init__(self):
'''Initial env info and parameters for decision making
'''
self.states = ['Initial', 'Exploration', 'Verification']
self.current_state = 0
self.env = ENV()
self.hypo_locations = (['No','Feature_low','Feature_middle',
'Feature_high'])
self.hypo_location = 0
self.hypo_samples = (['No','Feature_low', 'Feature_middle',
'Feature_high'])
self.hypo_sample = 0
self.information_matrix = []
self.accuracy_matrix = []
self.fitting_error_matrix = []
def | (self, hypo_location, hypo_sample):
self.hypo_location = hypo_location
self.hypo_sample = hypo_sample
def choose_initial_template(self):
'''choose initial template
According to the initial knowledge and hypothesis, human will select a
experience data sample distribution
Args:
self.hypo_location: inital hypo about data location feature
self.hypo_sample : initial hypo about data sample feature
Returns:
change the initial template in env wrapper
'''
if(self.hypo_location == 0):
location_index = [1,9,13,21]
elif(self.hypo_location == 1):
location_index = [1,4,7,11,16,21]
elif(self.hypo_location == 2):
location_index = [1,5,9,12,15,21]
elif(self.hypo_location == 3):
location_index = [1,6,11,14,17,20]
if(self.hypo_sample == 0):
sample_index = [3,3,3,3]
elif(self.hypo_sample == 1):
sample_index = [5,5,3,3,3,3]
elif(self.hypo_sample == 2):
sample_index = [3,3,5,5,3,3]
elif(self.hypo_sample == 3):
sample_index = [3,3,3,3,5,5]
initial_action = [location_index, sample_index]
self.env.initiate_template(initial_action)
def handle_information_coverage(self):
sample_state = self.env.get_state()
sample_loc = np.array(sample_state[0])
sample_number = np.array(sample_state[1])
sort_index = np.argsort(sample_loc)
sample_loc = sample_loc[sort_index]
sample_number = sample_number[sort_index]
unique_index = np.unique(sample_loc, return_index = True)
sample_loc = sample_loc[unique_index[1]]
sample_number = sample_number[unique_index[1]]
sample_state = [sample_loc, sample_number]
print(sample_state)
self.information_matrix = np.zeros(22) #information matrix in location
self.variable_coverage = np.zeros(20)
for i in range(len(sample_state[0])):
scale = 0.1 * sample_state[1][i] + 1
locs = sample_state[0][i] + 1
self.information_matrix += gauss(locs, scale)
# print(self.information_matrix)
# print(gauss(locs, scale))
# self.plot_line('cool green', np.linspace(1,22,22), gauss(locs, scale), 'test'+str(i))
# print("coverage_matrix: ", self.information_matrix)
mm, erodi = self.env.get_data_state()
mm_mean = np.mean(mm, axis=0)
mm_nonzero = mm[np.nonzero(mm)]
mm_mean_nonzero = mm_mean[np.nonzero(mm_mean)]
start = 0 # 区间左端点
number_of_interval = 20 # 区间个数
length = 1 # 区间长度
intervals = {'{}~{}'.format(length*x+start, length*(x+1)+start): 0 for x in range(number_of_interval)} # 生成区间
result = np.array(interval_statistics(mm_nonzero, intervals))
self.variable_coverage = len(result[(np.nonzero(result))])/len(result)
result_number = np.linspace(0, 19, 20)
variable_information = np.zeros(20)
for i in range(len(result_number)):
single_converage = gauss_variable(result_number[i] +0.5, result[i])
variable_information += single_converage
# feed the variable coverage into the previous belief
self.variable_information = variable_information
# print(mm_mean_nonzero)
# print(sample_state[0])
# p , e = optimize.curve_fit(piecewise_linear_moisture, np.array(sample_state[0])+1, mm_mean_nonzero)
# xloc = np.linspace(1, 22, 22)
# xmoisture = piecewise_linear_moisture(xloc, *p)
# self.mapping_value = []
# for emoisture in xmoisture:
# self.mapping_value.append(variable_information[int(emoisture)])
# print(variable_information)
# print(self.mapping_value)
# plt.plot(xloc,xmoisture )
# plt.show()
def handle_information_accuracy(self):
accuracy_matrix = []
mm, data_state = self.env.get_data_state()
loc_state = self.env.get_state()
# error_cost = np.std(data_state, axis=0)
for col in range(data_state.shape[1]):
if col in loc_state[0]:
effective_data = data_state[:,col][np.nonzero(data_state[:,col])]
# print(effective_data)
median = np.median(effective_data)
k1 = 1.4826
mad = k1 * np.median(np.abs(effective_data-median))
lower_limit = median - (3*mad)
upper_limit = median + (3*mad)
outlier_data_num = (len(effective_data[(effective_data>
upper_limit) & (effective_data<lower_limit)]))
data_samples = len(effective_data)
if(data_samples == 0):
total_cost = 0
elif(data_samples > 0):
total_cost = 1 - 1/(1+ (data_samples - 0.99)/(3*outlier_data_num + 1))
accuracy_matrix.append(total_cost)
else:
accuracy_matrix.append(0)
self.accuracy_matrix = accuracy_matrix
# print('accuracy_matrix: ', self.accuracy_matrix)
def handle_feature_point_detection(self):
loc_state = self.env.get_state()[0]
#print(self.env.get_state())
self.fitting_error_matrix = np.zeros(22)
mm, erodi = self.env.get_data_state()
mm_mean = np.mean(mm, axis=0)
mm_nonzeroindex = (mm_mean != 0)
erodi_mean = np.mean(erodi, axis=0)
self.loc_index = np.linspace(1,22,22)[mm_nonzeroindex]
data_index = mm_mean[mm_nonzeroindex]
data_mean = erodi_mean[mm_nonzeroindex]
p , e = optimize.curve_fit(piecewise_linear, data_index, data_mean)
# print('dfadfaaf', p)
xd = np.linspace(0, np.max(data_index), 22)
fit_curve = piecewise_linear(xd, *p)
fitting_results = piecewise_linear(data_index, *p)
self.fitting_results = fitting_results
fitting_error = fitting_results - data_mean
mm_mean[mm_nonzeroindex] = fitting_error
self.data_index = data_index
self.fitting_error_matrix[mm_nonzeroindex] = fitting_error
# print(data_mean)
nonzero_data_mean = data_mean[np.nonzero(data_mean != 0)]
rmse_data = (sqrt(np.sum(np.power(nonzero_data_mean, 2))/
np.size(nonzero_data_mean)))
# print(rmse_data)
self.rmse_data = rmse_data
# plt.plot(xd, fit_curve)
# plt.plot(data_index, data_mean, "o")
# plt.plot(data_index, fitting_results, "*")
# #plt.plot(data_index, fitting_error, "+")
# plt.show()
# plt.savefig('123.png')
# find the feature point location
array = np.asarray(data_index)
idx = (np.abs(array - p[0])).argmin()
loc_indx = loc_state[idx]
saturation_estimated = int(loc_indx * (p[0]/array[idx]))
self.saturation_selection = np.arange(saturation_estimated - 2, saturation_estimated + 3, 1)
def confidence_model(self):
non_zero_matrix = (self.fitting_error_matrix[np.nonzero
(self.fitting_error_matrix != 0)])
rmse = (sqrt(np.sum(np.power(non_zero_matrix, 2))/
np.size(non_zero_matrix)))
# print(rmse)
# print(self.fitting_error_matrix)
# print(non_zero_matrix)
whole_rmse_percentage = rmse/self.rmse_data
# print(whole_rmse_percentage)
confindence = (0.04 - whole_rmse_percentage) * 30 * self.coverage_criteria
# print(confindence)
def handle_state_judge(self):
if(self.current_state == 0):
self.current_state = 1
elif(self.current_state == 1):
if(np.min(self.accuracy_matrix) > 0.7 and
len(self.information_matrix[self.information_matrix > 0.8]) > 15):
self.current_state = 2
else:
self.current_state = 1
elif(self.current_state == 2):
if(len(self.fitting_error_matrix[self.fitting_error_matrix > 0.8]) > 0):
self.current_state = 1
elif():
self.current_state = 2
def information_model(self):
self.coverage_criteria = (len(self.information_matrix[self.information_matrix
> 0.3]) / 22)
accuracy_matrix = np.array(self.accuracy_matrix)
# print(accuracy_matrix)
self.accuracy_criteria = (len(accuracy_matrix[(accuracy_matrix > 0.6) & (accuracy_matrix != 0)]) /
len(accuracy_matrix[accuracy_matrix != 0]))
# print('accuracy_value:', self.accuracy_criteria) # percentage of locs which the accuracy is lower than 0.6
# print('coverage_value:', self.coverage_criteria) # percentage of locs which the information is lower than 0.8
def take_action(self):
if(self.current_state == 0):
self.choose_initial_template()
elif(self.current_state == 1):
action_loc = np.argmin(self.information_matrix)
self.env.set_action([action_loc],[3])
accuracy_loc = np.where(self.accuracy_matrix < 0.7)
accuracy_samples = np.ones(len(accuracy_loc))
self.env.set_action(accuracy_loc,accuracy_samples)
elif(self.current_state == 2):
fitting_error_loc = np.where(self.fitting_error_matrix > 0.8)
add_loc = []
add_samples = []
current_state = self.env.get_state()
for i in fitting_error_loc:
if not i+1 in current_state[0]:
add_loc.append(i+1)
add_samples.append(3)
if not i-1 in current_state[0]:
add_loc.append(i-1)
add_samples.append(3)
self.env.set_action(add_loc, add_samples)
def plot(self, color, name):
myparams = {
'axes.labelsize': '10',
'xtick.labelsize': '10',
'ytick.labelsize': '10',
'lines.linewidth': 1,
'legend.fontsize': '3',
'font.family': 'Times New Roman',
'figure.figsize': '9, 5' #图片尺寸
}
pylab.rcParams.update(myparams) #更新自己的设置
# line_styles=['ro-','b^-','gs-','ro--','b^--','gs--'] #线型设置
fig1 = plt.figure(1)
a = plt.plot(self.coverage_criteria, self.accuracy_criteria ,marker='o', color=sns.xkcd_rgb[color],
markersize=5)
plt.legend(loc="lower right") #图例位置 右下角
plt.ylabel('accuracy')
plt.xlabel('coverage ')
plt.xlim((0, 1.1))
plt.ylim((0, 1.1))
plt.axvline(x=1, c="b", ls="--", lw=1)
plt.axhline(y=1, c="b", ls="--", lw=1)
plt.savefig(name)
#注意.show()操作后会默认打开一个空白fig,此时保存,容易出现保存的为纯白背景,所以请在show()操作前保存fig.
# plt.show()
def interval_statistics(data, intervals):
if len(data) == 0:
return
for num in data:
for interval in intervals:
lr = tuple(interval.split('~'))
left, right = float(lr[0]), float(lr[1])
if left <= num <= right:
intervals[interval] += 1
results = []
for key, value in intervals.items():
#print("%10s" % key, end='') # 借助 end=''可以不换行
# print("%10s" % value, end='') # "%10s" 右对齐
#print('%16s' % '{:.3%}'.format(value * 1.0 / len(data)))
results.append(value)
return results
def piecewise_linear(x, x0, y0, k1):
# x<x0 ⇒ lambda x: k1*x + y0 - k1*x0
# x>=x0 ⇒ lambda x: k2*x + y0 - k2*x0
return np.piecewise(x, [x < x0, x >= x0], [lambda x:k1*x + y0-k1*x0,
lambda x: y0])
def piecewise_linear_moisture(x, x0, y0, k1, k2):
# x<x0 ⇒ lambda x: k1*x + y0 - k1*x0
# x>=x0 ⇒ lambda x: k2*x + y0 - k2*x0
return np.piecewise(x, [x < x0, x >= x0], [lambda x:k1*x + y0-k1*x0,
lambda x: k2*x + y0 - k2*x0])
def gauss(mean, scale, x=np.linspace(1,22,22), sigma=1):
return scale * np.exp(-np.square(x - mean) / (2 * sigma ** 2))
def gauss_variable(mean, scale, x=np.linspace(0,19,20), sigma=1):
return scale * np.exp(-np.square(x - mean) / (2 * sigma ** 2))
if __name__ == "__main__":
DM = rule_state_machine()
DM.choose_initial_template()
# x = np.linspace(1,22,22)
# information_matrix = gauss(1,0.1).reshape(22,1)
# print(information_matrix)
# sns.set()
# ax = sns.heatmap(information_matrix, vmin=0, vmax=1)
# plt.title('Information Matrix')
# plt.savefig("test.png")
DM.handle_information_accuracy()
DM.handle_information_coverage()
DM.information_model()
DM.plot('cool green','test')
DM.handle_feature_point_detection()
DM.confidence_model()
| set_init_hypo |
webpack.plugin-renderer.config.ts | import * as fs from "fs";
import * as path from "path";
import { DefinePlugin, Configuration, ProvidePlugin, SourceMapDevToolPlugin } from "webpack";
import { APPLICATION_ROOT, PORTAL_ROOT } from "../../globals/build_constants";
import {
BASE_PROJECT_FAVICON_PATH,
BASE_PROJECT_STATIC_FILES,
BASE_PROJECT_TEMPLATE_PATH,
DOTENV_CONFIG_PATH,
ENVIRONMENT,
IS_PRODUCTION,
MODULES_CONFIG,
PROJECT_CORE_DEPENDENCIES,
PROJECT_ROOT_PATH,
ANALYZE_ENABLED,
REPORT_BUNDLE_ANALYZER_PATH,
REPORT_BUNDLE_STATS_PATH,
RUNTIME_CONSTANTS,
TS_CONFIG_PATH,
RENDERER_MODULES_ROOT_PATH,
BACKEND_PUBLIC_PATH,
ESLINT_CONFIG_PATH,
ESLINT_IGNORE_PATH, DEV_SERVER_REFRESH
} from "../webpack.constants";
import { IModuleDefinition } from "../webpack.types";
// CJS way to import most plugins.
const ReactRefreshWebpackPlugin = require("@pmmmwh/react-refresh-webpack-plugin");
const CopyWebpackPlugin = require("copy-webpack-plugin");
const DotEnv = require("dotenv-webpack");
const DuplicatePackageCheckerPlugin = require("duplicate-package-checker-webpack-plugin");
const ForkTsCheckerWebpackPlugin = require("fork-ts-checker-webpack-plugin");
const HtmlWebpackPlugin = require("html-webpack-plugin");
const TerserPlugin = require("terser-webpack-plugin");
const BundleAnalyzerPlugin = require("webpack-bundle-analyzer").BundleAnalyzerPlugin;
const StatsWriterPlugin = require("webpack-stats-plugin").StatsWriterPlugin;
/**
* Create separate chunks/dependencies for each module group with shared core/base.
*/
function createChunkCacheGroups(definitions: Array<IModuleDefinition>): Record<string, any> {
const entries: Record<string, any> = {};
for (const it of definitions) {
entries[`modules/${it.name}/l`] = ({
maxSize: 750 * 1000,
priority: 60,
reuseExistingChunk: true,
test: new RegExp(`/modules/${it.name}/node_modules/`)
});
entries[`modules/${it.name}/s`] = ({
maxSize: 250 * 1000,
priority: 30,
reuseExistingChunk: true,
test: new RegExp(`/modules/${it.name}/`)
});
}
return entries;
}
/**
* For each module create separate HTML entry with own dependencies.
*/
function createHTMLEntry(definition: IModuleDefinition): typeof HtmlWebpackPlugin {
const modulePath: string = path.resolve(RENDERER_MODULES_ROOT_PATH, definition.entry);
const moduleTemplatePath: string = path.resolve(modulePath, "index.hbs");
return new HtmlWebpackPlugin({
ENVIRONMENT,
chunks: [ "initialization", definition.name ],
chunksSortMode: "manual",
constants: {
APPLICATION_ROOT,
APPLICATION_TITLE: definition.title,
PORTAL_ROOT
},
favicon: BASE_PROJECT_FAVICON_PATH,
filename: `${definition.name}.html`,
inject: "body",
minify: {
collapseWhitespace: IS_PRODUCTION,
minifyCSS: true,
preserveLineBreaks: !IS_PRODUCTION,
quoteCharacter: "'",
removeComments: true,
removeTagWhitespace: true,
trimCustomFragments: true
},
template: fs.existsSync(moduleTemplatePath) ? moduleTemplatePath : BASE_PROJECT_TEMPLATE_PATH
});
}
/**
* Webpack plugins configuration.
*/
export const PLUGIN_CONFIG: {
PLUGINS: Configuration["plugins"];
OPTIMIZATION: Configuration["optimization"];
} = {
OPTIMIZATION: {
minimizer: [
new TerserPlugin({
terserOptions: {
compress: {
"drop_console": IS_PRODUCTION,
ecma: 5,
passes: IS_PRODUCTION ? 5 : 1
},
output: {
beautify: !IS_PRODUCTION,
ecma: 5
}
}
})
],
moduleIds: "deterministic",
emitOnErrors: !IS_PRODUCTION,
runtimeChunk: "single",
splitChunks: {
cacheGroups: {
"core": {
priority: 100,
reuseExistingChunk: false,
test: new RegExp(
`/node_modules/(${ | },
"vendor": {
priority: 70,
reuseExistingChunk: false,
test: /\/src\/node_modules\//
},
...createChunkCacheGroups(MODULES_CONFIG.modules),
"shared": {
priority: 10,
reuseExistingChunk: true,
test: /node_modules/
}
},
chunks: "all",
maxAsyncRequests: 50,
maxInitialRequests: 25,
maxSize: 300 * 1000,
minSize: 5 * 1000
}
},
PLUGINS: [
/**
* Generate HTML for each module.
* Maintain separate submodule with own base template for each application.
*/
...MODULES_CONFIG.modules.map(createHTMLEntry),
new DuplicatePackageCheckerPlugin({ verbose: true }),
new DotEnv({ path: DOTENV_CONFIG_PATH }),
new DefinePlugin(RUNTIME_CONSTANTS),
new ProvidePlugin({ React: "react" }),
new CopyWebpackPlugin({
patterns: BASE_PROJECT_STATIC_FILES.map((it: string) => ({ from: it, to: "." }))
}),
new ForkTsCheckerWebpackPlugin({
eslint: {
enabled: true,
files: path.resolve(PROJECT_ROOT_PATH, "./src/**/*.{ts,tsx,js,jsx}"),
options: {
configFile: ESLINT_CONFIG_PATH,
ignorePath: ESLINT_IGNORE_PATH
}
},
typescript: {
enabled: true,
configFile: TS_CONFIG_PATH
}
})
]
.concat(IS_PRODUCTION ? [] : [
new SourceMapDevToolPlugin({
filename: "source_maps/[base].map[query]",
publicPath: BACKEND_PUBLIC_PATH,
fileContext: "public"
})
])
.concat(ANALYZE_ENABLED ? [
new BundleAnalyzerPlugin({
analyzerMode: "static",
defaultSizes: "gzip",
openAnalyzer: false,
reportFilename: REPORT_BUNDLE_ANALYZER_PATH
}),
new StatsWriterPlugin({
filename: REPORT_BUNDLE_STATS_PATH,
stats: {
all: true,
assets: true,
assetsByChunkName: true,
children: false,
chunks: false,
entrypoints: true,
hash: true,
logging: false,
modules: false,
namedChunkGroups: false,
outputPath: false,
publicPath: false,
version: false
}
})
]: [])
.concat(DEV_SERVER_REFRESH ? [
new ReactRefreshWebpackPlugin({
exclude: [
/\/application\/initialization/,
/node_modules/
]
})
] :[])
}; | PROJECT_CORE_DEPENDENCIES.reduce((accumulator: string, it: string) =>
accumulator ? accumulator + "|" + it : it)
})/`
) |
config.go | package config
import (
"encoding/json"
"flag"
"fmt"
"os"
"reflect"
"strings"
"github.com/buildbuddy-io/buildbuddy/server/util/alert"
"gopkg.in/yaml.v2"
"github.com/buildbuddy-io/buildbuddy/server/util/log"
)
// When adding new storage fields, always be explicit about their yaml field
// name.
type generalConfig struct {
Org OrgConfig `yaml:"org"`
Integrations integrationsConfig `yaml:"integrations"`
Github GithubConfig `yaml:"github"`
API APIConfig `yaml:"api"`
Storage storageConfig `yaml:"storage"`
SSL SSLConfig `yaml:"ssl"`
Auth authConfig `yaml:"auth"`
RemoteExecution RemoteExecutionConfig `yaml:"remote_execution"`
BuildEventProxy buildEventProxy `yaml:"build_event_proxy"`
App appConfig `yaml:"app"`
Database DatabaseConfig `yaml:"database"`
Cache cacheConfig `yaml:"cache"`
Executor ExecutorConfig `yaml:"executor"`
}
type appConfig struct {
BuildBuddyURL string `yaml:"build_buddy_url" usage:"The external URL where your BuildBuddy instance can be found."`
EventsAPIURL string `yaml:"events_api_url" usage:"Overrides the default build event protocol gRPC address shown by BuildBuddy on the configuration screen."`
CacheAPIURL string `yaml:"cache_api_url" usage:"Overrides the default remote cache protocol gRPC address shown by BuildBuddy on the configuration screen."`
RemoteExecutionAPIURL string `yaml:"remote_execution_api_url" usage:"Overrides the default remote execution protocol gRPC address shown by BuildBuddy on the configuration screen."`
LogLevel string `yaml:"log_level" usage:"The desired log level. Logs with a level >= this level will be emitted. One of {'fatal', 'error', 'warn', 'info', 'debug'}"`
GRPCMaxRecvMsgSizeBytes int `yaml:"grpc_max_recv_msg_size_bytes" usage:"Configures the max GRPC receive message size [bytes]"`
GRPCOverHTTPPortEnabled bool `yaml:"grpc_over_http_port_enabled" usage:"Cloud-Only"`
AddUserToDomainGroup bool `yaml:"add_user_to_domain_group" usage:"Cloud-Only"`
DefaultToDenseMode bool `yaml:"default_to_dense_mode" usage:"Enables the dense UI mode by default."`
CreateGroupPerUser bool `yaml:"create_group_per_user" usage:"Cloud-Only"`
EnableTargetTracking bool `yaml:"enable_target_tracking" usage:"Cloud-Only"`
EnableStructuredLogging bool `yaml:"enable_structured_logging" usage:"If true, log messages will be json-formatted."`
LogIncludeShortFileName bool `yaml:"log_include_short_file_name" usage:"If true, log messages will include shortened originating file name."`
NoDefaultUserGroup bool `yaml:"no_default_user_group" usage:"Cloud-Only"`
LogEnableGCPLoggingFormat bool `yaml:"log_enable_gcp_logging_format" usage:"If true, the output structured logs will be compatible with format expected by GCP Logging."`
LogErrorStackTraces bool `yaml:"log_error_stack_traces" usage:"If true, stack traces will be printed for errors that have them."`
TraceProjectID string `yaml:"trace_project_id" usage:"Optional GCP project ID to export traces to. If not specified, determined from default credentials or metadata server if running on GCP."`
TraceJaegerCollector string `yaml:"trace_jaeger_collector" usage:"Address of the Jager collector endpoint where traces will be sent."`
TraceServiceName string `yaml:"trace_service_name" usage:"Name of the service to associate with traces."`
TraceFraction float64 `yaml:"trace_fraction" usage:"Fraction of requests to sample for tracing."`
TraceFractionOverrides []string `yaml:"trace_fraction_overrides" usage:"Tracing fraction override based on name in format name=fraction."`
IgnoreForcedTracingHeader bool `yaml:"ignore_forced_tracing_header" usage:"If set, we will not honor the forced tracing header."`
CodeEditorEnabled bool `yaml:"code_editor_enabled" usage:"If set, code editor functionality will be enabled."`
}
type buildEventProxy struct {
Hosts []string `yaml:"hosts" usage:"The list of hosts to pass build events onto."` |
type DatabaseConfig struct {
DataSource string `yaml:"data_source" usage:"The SQL database to connect to, specified as a connection string."`
ReadReplica string `yaml:"read_replica" usage:"A secondary, read-only SQL database to connect to, specified as a connection string."`
StatsPollInterval string `yaml:"stats_poll_interval" usage:"How often to poll the DB client for connection stats (default: '5s')."`
MaxOpenConns int `yaml:"max_open_conns" usage:"The maximum number of open connections to maintain to the db"`
MaxIdleConns int `yaml:"max_idle_conns" usage:"The maximum number of idle connections to maintain to the db"`
ConnMaxLifetimeSeconds int `yaml:"conn_max_lifetime_seconds" usage:"The maximum lifetime of a connection to the db"`
LogQueries bool `yaml:"log_queries" usage:"If true, log all queries"`
}
type storageConfig struct {
Disk DiskConfig `yaml:"disk"`
GCS GCSConfig `yaml:"gcs"`
AwsS3 AwsS3Config `yaml:"aws_s3"`
TTLSeconds int `yaml:"ttl_seconds" usage:"The time, in seconds, to keep invocations before deletion"`
ChunkFileSizeBytes int `yaml:"chunk_file_size_bytes" usage:"How many bytes to buffer in memory before flushing a chunk of build protocol data to disk."`
EnableChunkedEventLogs bool `yaml:"enable_chunked_event_logs" usage:"If true, Event logs will be stored separately from the invocation proto in chunks."`
}
type DiskCachePartition struct {
ID string `yaml:"id" json:"id" usage:"The ID of the partition."`
MaxSizeBytes int64 `yaml:"max_size" json:"max_size_bytes" usage:"Maximum size of the partition."`
}
type DiskCachePartitionMapping struct {
GroupID string `yaml:"group_id" json:"group_id" usage:"The Group ID to which this mapping applies."`
Prefix string `yaml:"prefix" json:"prefix" usage:"The remote instance name prefix used to select this partition."`
PartitionID string `yaml:"partition_id" json:"partition_id" usage:"The partition to use if the Group ID and prefix match."`
}
type DiskConfig struct {
RootDirectory string `yaml:"root_directory" usage:"The root directory to store all blobs in, if using disk based storage."`
Partitions []DiskCachePartition `yaml:"partitions"`
PartitionMappings []DiskCachePartitionMapping `yaml:"partition_mappings"`
}
type GCSConfig struct {
Bucket string `yaml:"bucket" usage:"The name of the GCS bucket to store build artifact files in."`
CredentialsFile string `yaml:"credentials_file" usage:"A path to a JSON credentials file that will be used to authenticate to GCS."`
ProjectID string `yaml:"project_id" usage:"The Google Cloud project ID of the project owning the above credentials and GCS bucket."`
}
type AwsS3Config struct {
Region string `yaml:"region" usage:"The AWS region."`
Bucket string `yaml:"bucket" usage:"The AWS S3 bucket to store files in."`
CredentialsProfile string `yaml:"credentials_profile" usage:"A custom credentials profile to use."`
// Useful for configuring MinIO: https://docs.min.io/docs/how-to-use-aws-sdk-for-go-with-minio-server.html
Endpoint string `yaml:"endpoint" usage:"The AWS endpoint to use, useful for configuring the use of MinIO."`
StaticCredentialsID string `yaml:"static_credentials_id" usage:"Static credentials ID to use, useful for configuring the use of MinIO."`
StaticCredentialsSecret string `yaml:"static_credentials_secret" usage:"Static credentials secret to use, useful for configuring the use of MinIO."`
StaticCredentialsToken string `yaml:"static_credentials_token" usage:"Static credentials token to use, useful for configuring the use of MinIO."`
DisableSSL bool `yaml:"disable_ssl" usage:"Disables the use of SSL, useful for configuring the use of MinIO."`
S3ForcePathStyle bool `yaml:"s3_force_path_style" usage:"Force path style urls for objects, useful for configuring the use of MinIO."`
}
type integrationsConfig struct {
Slack SlackConfig `yaml:"slack"`
}
type SlackConfig struct {
WebhookURL string `yaml:"webhook_url" usage:"A Slack webhook url to post build update messages to."`
}
type GCSCacheConfig struct {
Bucket string `yaml:"bucket" usage:"The name of the GCS bucket to store cache files in."`
CredentialsFile string `yaml:"credentials_file" usage:"A path to a JSON credentials file that will be used to authenticate to GCS."`
ProjectID string `yaml:"project_id" usage:"The Google Cloud project ID of the project owning the above credentials and GCS bucket."`
TTLDays int64 `yaml:"ttl_days" usage:"The period after which cache files should be TTLd. Disabled if 0."`
}
type S3CacheConfig struct {
Region string `yaml:"region" usage:"The AWS region."`
Bucket string `yaml:"bucket" usage:"The AWS S3 bucket to store files in."`
CredentialsProfile string `yaml:"credentials_profile" usage:"A custom credentials profile to use."`
TTLDays int64 `yaml:"ttl_days" usage:"The period after which cache files should be TTLd. Disabled if 0."`
// Useful for configuring MinIO: https://docs.min.io/docs/how-to-use-aws-sdk-for-go-with-minio-server.html
Endpoint string `yaml:"endpoint" usage:"The AWS endpoint to use, useful for configuring the use of MinIO."`
StaticCredentialsID string `yaml:"static_credentials_id" usage:"Static credentials ID to use, useful for configuring the use of MinIO."`
StaticCredentialsSecret string `yaml:"static_credentials_secret" usage:"Static credentials secret to use, useful for configuring the use of MinIO."`
StaticCredentialsToken string `yaml:"static_credentials_token" usage:"Static credentials token to use, useful for configuring the use of MinIO."`
DisableSSL bool `yaml:"disable_ssl" usage:"Disables the use of SSL, useful for configuring the use of MinIO."`
S3ForcePathStyle bool `yaml:"s3_force_path_style" usage:"Force path style urls for objects, useful for configuring the use of MinIO."`
}
type DistributedCacheConfig struct {
ListenAddr string `yaml:"listen_addr" usage:"The address to listen for local BuildBuddy distributed cache traffic on."`
RedisTarget string `yaml:"redis_target" usage:"A redis target for improved Caching/RBE performance. Target can be provided as either a redis connection URI or a host:port pair. URI schemas supported: redis[s]://[[USER][:PASSWORD]@][HOST][:PORT][/DATABASE] or unix://[[USER][:PASSWORD]@]SOCKET_PATH[?db=DATABASE] ** Enterprise only **"`
GroupName string `yaml:"group_name" usage:"A unique name for this distributed cache group. ** Enterprise only **"`
Nodes []string `yaml:"nodes" usage:"The hardcoded list of peer distributed cache nodes. If this is set, redis_target will be ignored. ** Enterprise only **"`
ReplicationFactor int `yaml:"replication_factor" usage:"How many total servers the data should be replicated to. Must be >= 1. ** Enterprise only **"`
ClusterSize int `yaml:"cluster_size" usage:"The total number of nodes in this cluster. Required for health checking. ** Enterprise only **"`
}
type RedisCacheConfig struct {
RedisTarget string `yaml:"redis_target" usage:"A redis target for improved Caching/RBE performance. Target can be provided as either a redis connection URI or a host:port pair. URI schemas supported: redis[s]://[[USER][:PASSWORD]@][HOST][:PORT][/DATABASE] or unix://[[USER][:PASSWORD]@]SOCKET_PATH[?db=DATABASE] ** Enterprise only **"`
MaxValueSizeBytes int64 `yaml:"max_value_size_bytes" usage:"The maximum value size to cache in redis (in bytes)."`
}
type cacheConfig struct {
Disk DiskConfig `yaml:"disk"`
RedisTarget string `yaml:"redis_target" usage:"A redis target for improved Caching/RBE performance. Target can be provided as either a redis connection URI or a host:port pair. URI schemas supported: redis[s]://[[USER][:PASSWORD]@][HOST][:PORT][/DATABASE] or unix://[[USER][:PASSWORD]@]SOCKET_PATH[?db=DATABASE] ** Enterprise only **"`
S3 S3CacheConfig `yaml:"s3"`
GCS GCSCacheConfig `yaml:"gcs"`
MemcacheTargets []string `yaml:"memcache_targets" usage:"Deprecated. Use Redis Target instead."`
Redis RedisCacheConfig `yaml:"redis"`
DistributedCache DistributedCacheConfig `yaml:"distributed_cache"`
MaxSizeBytes int64 `yaml:"max_size_bytes" usage:"How big to allow the cache to be (in bytes)."`
InMemory bool `yaml:"in_memory" usage:"Whether or not to use the in_memory cache."`
}
type authConfig struct {
JWTKey string `yaml:"jwt_key" usage:"The key to use when signing JWT tokens."`
APIKeyGroupCacheTTL string `yaml:"api_key_group_cache_ttl" usage:"Override for the TTL for API Key to Group caching. Set to '0' to disable cache."`
OauthProviders []OauthProvider `yaml:"oauth_providers"`
EnableAnonymousUsage bool `yaml:"enable_anonymous_usage" usage:"If true, unauthenticated build uploads will still be allowed but won't be associated with your organization."`
}
type OauthProvider struct {
IssuerURL string `yaml:"issuer_url" usage:"The issuer URL of this OIDC Provider."`
ClientID string `yaml:"client_id" usage:"The oauth client ID."`
ClientSecret string `yaml:"client_secret" usage:"The oauth client secret."`
}
type SSLConfig struct {
CertFile string `yaml:"cert_file" usage:"Path to a PEM encoded certificate file to use for TLS if not using ACME."`
KeyFile string `yaml:"key_file" usage:"Path to a PEM encoded key file to use for TLS if not using ACME."`
ClientCACertFile string `yaml:"client_ca_cert_file" usage:"Path to a PEM encoded certificate authority file used to issue client certificates for mTLS auth."`
ClientCAKeyFile string `yaml:"client_ca_key_file" usage:"Path to a PEM encoded certificate authority key file used to issue client certificates for mTLS auth."`
HostWhitelist []string `yaml:"host_whitelist" usage:"Cloud-Only"`
EnableSSL bool `yaml:"enable_ssl" usage:"Whether or not to enable SSL/TLS on gRPC connections (gRPCS)."`
UpgradeInsecure bool `yaml:"upgrade_insecure" usage:"True if http requests should be redirected to https"`
UseACME bool `yaml:"use_acme" usage:"Whether or not to automatically configure SSL certs using ACME. If ACME is enabled, cert_file and key_file should not be set."`
DefaultHost string `yaml:"default_host" usage:"Host name to use for ACME generated cert if TLS request does not contain SNI."`
}
type RemoteExecutionConfig struct {
DefaultPoolName string `yaml:"default_pool_name" usage:"The default executor pool to use if one is not specified."`
EnableWorkflows bool `yaml:"enable_workflows" usage:"Whether to enable BuildBuddy workflows."`
WorkflowsPoolName string `yaml:"workflows_pool_name" usage:"The executor pool to use for workflow actions. Defaults to the default executor pool if not specified."`
WorkflowsDefaultImage string `yaml:"workflows_default_image" usage:"The default docker image to use for running workflows."`
WorkflowsCIRunnerDebug bool `yaml:"workflows_ci_runner_debug" usage:"Whether to run the CI runner in debug mode."`
WorkflowsCIRunnerBazelCommand string `yaml:"workflows_ci_runner_bazel_command" usage:"Bazel command to be used by the CI runner."`
RedisTarget string `yaml:"redis_target" usage:"A Redis target for storing remote execution state. Required for remote execution. To ease migration, the redis target from the cache config will be used if this value is not specified."`
SharedExecutorPoolGroupID string `yaml:"shared_executor_pool_group_id" usage:"Group ID that owns the shared executor pool."`
RedisPubSubPoolSize int `yaml:"redis_pubsub_pool_size" usage:"Maximum number of connections used for waiting for execution updates."`
EnableRemoteExec bool `yaml:"enable_remote_exec" usage:"If true, enable remote-exec. ** Enterprise only **"`
RequireExecutorAuthorization bool `yaml:"require_executor_authorization" usage:"If true, executors connecting to this server must provide a valid executor API key."`
EnableUserOwnedExecutors bool `yaml:"enable_user_owned_executors" usage:"If enabled, users can register their own executors with the scheduler."`
EnableExecutorKeyCreation bool `yaml:"enable_executor_key_creation" usage:"If enabled, UI will allow executor keys to be created."`
}
type ExecutorConfig struct {
AppTarget string `yaml:"app_target" usage:"The GRPC url of a buildbuddy app server."`
RootDirectory string `yaml:"root_directory" usage:"The root directory to use for build files."`
LocalCacheDirectory string `yaml:"local_cache_directory" usage:"A local on-disk cache directory. Must be on the same device (disk partition, Docker volume, etc.) as the configured root_directory, since files are hard-linked to this cache for performance reasons. Otherwise, 'Invalid cross-device link' errors may result."`
LocalCacheSizeBytes int64 `yaml:"local_cache_size_bytes" usage:"The maximum size, in bytes, to use for the local on-disk cache"`
DisableLocalCache bool `yaml:"disable_local_cache" usage:"If true, a local file cache will not be used."`
DockerSocket string `yaml:"docker_socket" usage:"If set, run execution commands in docker using the provided socket."`
APIKey string `yaml:"api_key" usage:"API Key used to authorize the executor with the BuildBuddy app server."`
ContainerdSocket string `yaml:"containerd_socket" usage:"(UNSTABLE) If set, run execution commands in containerd using the provided socket."`
DockerMountMode string `yaml:"docker_mount_mode" usage:"Sets the mount mode of volumes mounted to docker images. Useful if running on SELinux https://www.projectatomic.io/blog/2015/06/using-volumes-with-docker-can-cause-problems-with-selinux/"`
RunnerPool RunnerPoolConfig `yaml:"runner_pool"`
DockerNetHost bool `yaml:"docker_net_host" usage:"Sets --net=host on the docker command. Intended for local development only."`
DockerSiblingContainers bool `yaml:"docker_sibling_containers" usage:"If set, mount the configured Docker socket to containers spawned for each action, to enable Docker-out-of-Docker (DooD). Takes effect only if docker_socket is also set. Should not be set by executors that can run untrusted code."`
DefaultXCodeVersion string `yaml:"default_xcode_version" usage:"Sets the default XCode version number to use if an action doesn't specify one. If not set, /Applications/Xcode.app/ is used."`
}
func (c *ExecutorConfig) GetAppTarget() string {
if c.AppTarget == "" {
return "grpcs://cloud.buildbuddy.io"
}
return c.AppTarget
}
func (c *ExecutorConfig) GetRootDirectory() string {
if c.RootDirectory == "" {
return "/tmp/buildbuddy/remote_build"
}
return c.RootDirectory
}
func (c *ExecutorConfig) GetLocalCacheDirectory() string {
if c.DisableLocalCache {
return ""
}
if c.LocalCacheDirectory == "" {
return "/tmp/buildbuddy/filecache"
}
return c.LocalCacheDirectory
}
func (c *ExecutorConfig) GetLocalCacheSizeBytes() int64 {
if c.DisableLocalCache {
return 0
}
if c.LocalCacheSizeBytes == 0 {
return 1_000_000_000 // 1 GB
}
return c.LocalCacheSizeBytes
}
type RunnerPoolConfig struct {
MaxRunnerCount int `yaml:"max_runner_count" usage:"Maximum number of recycled RBE runners that can be pooled at once. Defaults to a value derived from estimated CPU usage, max RAM, allocated CPU, and allocated memory."`
MaxRunnerDiskSizeBytes int64 `yaml:"max_runner_disk_size_bytes" usage:"Maximum disk size for a recycled runner; runners exceeding this threshold are not recycled. Defaults to 16GB."`
MaxRunnerMemoryUsageBytes int64 `yaml:"max_runner_memory_usage_bytes" usage:"Maximum memory usage for a recycled runner; runners exceeding this threshold are not recycled. Defaults to 1/10 of total RAM allocated to the executor. (Only supported for Docker-based executors)."`
}
type APIConfig struct {
APIKey string `yaml:"api_key" usage:"The default API key to use for on-prem enterprise deploys with a single organization/group."`
EnableAPI bool `yaml:"enable_api" usage:"Whether or not to enable the BuildBuddy API."`
}
type GithubConfig struct {
ClientID string `yaml:"client_id" usage:"The client ID of your GitHub Oauth App. ** Enterprise only **"`
ClientSecret string `yaml:"client_secret" usage:"The client secret of your GitHub Oauth App. ** Enterprise only **"`
AccessToken string `yaml:"access_token" usage:"The GitHub access token used to post GitHub commit statuses. ** Enterprise only **"`
StatusNameSuffix string `yaml:"status_name_suffix" usage:"Suffix to be appended to all reported GitHub status names. Useful for differentiating BuildBuddy deployments. For example: '(dev)' ** Enterprise only **"`
StatusPerTestTarget bool `yaml:"status_per_test_target" usage:"If true, report status per test target. ** Enterprise only **"`
}
type OrgConfig struct {
Name string `yaml:"name" usage:"The name of your organization, which is displayed on your organization's build history."`
Domain string `yaml:"domain" usage:"Your organization's email domain. If this is set, only users with email addresses in this domain will be able to register for a BuildBuddy account."`
}
var sharedGeneralConfig generalConfig
type stringSliceFlag []string
func (i *stringSliceFlag) String() string {
return strings.Join(*i, ",")
}
// NOTE: string slice flags are *appended* to the values in the YAML,
// instead of overriding them completely.
func (i *stringSliceFlag) Set(values string) error {
for _, val := range strings.Split(values, ",") {
*i = append(*i, val)
}
return nil
}
type structSliceFlag struct {
dstSlice reflect.Value
structType reflect.Type
}
func (f *structSliceFlag) String() string {
if *f == (structSliceFlag{}) {
return "[]"
}
var l []string
for i := 0; i < f.dstSlice.Len(); i++ {
b, err := json.Marshal(f.dstSlice.Index(i).Interface())
if err != nil {
alert.UnexpectedEvent("config_cannot_marshal_struct", "err: %s", err)
continue
}
l = append(l, string(b))
}
return "[" + strings.Join(l, ",") + "]"
}
func (f *structSliceFlag) Set(value string) error {
dst := reflect.New(f.structType)
if err := json.Unmarshal([]byte(value), dst.Interface()); err != nil {
return err
}
f.dstSlice.Set(reflect.Append(f.dstSlice, dst.Elem()))
return nil
}
func defineFlagsForMembers(parentStructNames []string, T reflect.Value) {
typeOfT := T.Type()
for i := 0; i < T.NumField(); i++ {
f := T.Field(i)
fieldName := typeOfT.Field(i).Tag.Get("yaml")
docString := typeOfT.Field(i).Tag.Get("usage")
fqFieldName := strings.ToLower(strings.Join(append(parentStructNames, fieldName), "."))
switch f.Type().Kind() {
case reflect.Ptr:
log.Fatal("The config should not contain pointers!")
case reflect.Struct:
defineFlagsForMembers(append(parentStructNames, fieldName), f)
continue
case reflect.Bool:
flag.BoolVar(f.Addr().Interface().(*bool), fqFieldName, f.Bool(), docString)
case reflect.String:
flag.StringVar(f.Addr().Interface().(*string), fqFieldName, f.String(), docString)
case reflect.Int:
flag.IntVar(f.Addr().Interface().(*int), fqFieldName, int(f.Int()), docString)
case reflect.Int64:
flag.Int64Var(f.Addr().Interface().(*int64), fqFieldName, int64(f.Int()), docString)
case reflect.Float64:
flag.Float64Var(f.Addr().Interface().(*float64), fqFieldName, f.Float(), docString)
case reflect.Slice:
if f.Type().Elem().Kind() == reflect.String {
if slice, ok := f.Interface().([]string); ok {
sf := stringSliceFlag(slice)
flag.Var(&sf, fqFieldName, docString)
}
continue
} else if f.Type().Elem().Kind() == reflect.Struct {
sf := structSliceFlag{f, f.Type().Elem()}
flag.Var(&sf, fqFieldName, docString)
continue
}
fallthrough
default:
// We know this is not flag compatible and it's here for
// long-term support reasons, so don't warn about it.
if fqFieldName == "auth.oauth_providers" {
continue
}
log.Warningf("Skipping flag: --%s, kind: %s", fqFieldName, f.Type().Kind())
}
}
}
// Register flags too.
func init() {
defineFlagsForMembers([]string{}, reflect.ValueOf(&sharedGeneralConfig).Elem())
}
func readConfig(fullConfigPath string) (*generalConfig, error) {
if fullConfigPath == "" {
return &sharedGeneralConfig, nil
}
log.Infof("Reading buildbuddy config from '%s'", fullConfigPath)
_, err := os.Stat(fullConfigPath)
// If the file does not exist then we are SOL.
if os.IsNotExist(err) {
return nil, fmt.Errorf("Config file %s not found", fullConfigPath)
}
fileBytes, err := os.ReadFile(fullConfigPath)
if err != nil {
return nil, fmt.Errorf("Error reading config file: %s", err)
}
// expand environment variables
expandedFileBytes := []byte(os.ExpandEnv(string(fileBytes)))
if err := yaml.Unmarshal([]byte(expandedFileBytes), &sharedGeneralConfig); err != nil {
return nil, fmt.Errorf("Error parsing config file: %s", err)
}
// Re-parse flags so that they override the YAML config values.
flag.Parse()
return &sharedGeneralConfig, nil
}
type Configurator struct {
gc *generalConfig
fullConfigPath string
}
func NewConfigurator(configFilePath string) (*Configurator, error) {
conf, err := readConfig(configFilePath)
if err != nil {
return nil, err
}
return &Configurator{
fullConfigPath: configFilePath,
gc: conf,
}, nil
}
func (c *Configurator) GetStorageEnableChunkedEventLogs() bool {
return c.gc.Storage.EnableChunkedEventLogs
}
func (c *Configurator) GetStorageTTLSeconds() int {
return c.gc.Storage.TTLSeconds
}
func (c *Configurator) GetStorageChunkFileSizeBytes() int {
return c.gc.Storage.ChunkFileSizeBytes
}
func (c *Configurator) GetStorageDiskRootDir() string {
return c.gc.Storage.Disk.RootDirectory
}
func (c *Configurator) GetStorageGCSConfig() *GCSConfig {
return &c.gc.Storage.GCS
}
func (c *Configurator) GetStorageAWSS3Config() *AwsS3Config {
return &c.gc.Storage.AwsS3
}
func (c *Configurator) GetDatabaseConfig() *DatabaseConfig {
return &c.gc.Database
}
func (c *Configurator) GetDBDataSource() string {
return c.gc.Database.DataSource
}
func (c *Configurator) GetDBReadReplica() string {
return c.gc.Database.ReadReplica
}
func (c *Configurator) GetAppBuildBuddyURL() string {
return c.gc.App.BuildBuddyURL
}
func (c *Configurator) GetAppEventsAPIURL() string {
return c.gc.App.EventsAPIURL
}
func (c *Configurator) GetAppCacheAPIURL() string {
return c.gc.App.CacheAPIURL
}
func (c *Configurator) GetAppRemoteExecutionAPIURL() string {
return c.gc.App.RemoteExecutionAPIURL
}
func (c *Configurator) GetAppNoDefaultUserGroup() bool {
return c.gc.App.NoDefaultUserGroup
}
func (c *Configurator) GetAppCreateGroupPerUser() bool {
return c.gc.App.CreateGroupPerUser
}
func (c *Configurator) GetAppAddUserToDomainGroup() bool {
return c.gc.App.AddUserToDomainGroup
}
func (c *Configurator) GetAppLogIncludeShortFileName() bool {
return c.gc.App.LogIncludeShortFileName
}
func (c *Configurator) GetAppLogErrorStackTraces() bool {
return c.gc.App.LogErrorStackTraces
}
func (c *Configurator) GetAppEnableStructuredLogging() bool {
return c.gc.App.EnableStructuredLogging
}
func (c *Configurator) GetAppLogLevel() string {
return c.gc.App.LogLevel
}
func (c *Configurator) GetAppLogEnableGCPLoggingFormat() bool {
return c.gc.App.LogEnableGCPLoggingFormat
}
func (c *Configurator) GetGRPCOverHTTPPortEnabled() bool {
return c.gc.App.GRPCOverHTTPPortEnabled
}
func (c *Configurator) GetDefaultToDenseMode() bool {
return c.gc.App.DefaultToDenseMode
}
func (c *Configurator) GetCodeEditorEnabled() bool {
return c.gc.App.CodeEditorEnabled
}
func (c *Configurator) GetGRPCMaxRecvMsgSizeBytes() int {
n := c.gc.App.GRPCMaxRecvMsgSizeBytes
if n == 0 {
// Support large BEP messages: https://github.com/bazelbuild/bazel/issues/12050
return 50000000
}
return n
}
func (c *Configurator) EnableTargetTracking() bool {
return c.gc.App.EnableTargetTracking
}
func (c *Configurator) GetIntegrationsSlackConfig() *SlackConfig {
return &c.gc.Integrations.Slack
}
func (c *Configurator) GetBuildEventProxyHosts() []string {
return c.gc.BuildEventProxy.Hosts
}
func (c *Configurator) GetBuildEventProxyBufferSize() int {
return c.gc.BuildEventProxy.BufferSize
}
func (c *Configurator) GetCacheMaxSizeBytes() int64 {
return c.gc.Cache.MaxSizeBytes
}
func (c *Configurator) GetCacheDiskConfig() *DiskConfig {
if c.gc.Cache.Disk.RootDirectory != "" {
return &c.gc.Cache.Disk
}
return nil
}
func (c *Configurator) GetCacheGCSConfig() *GCSCacheConfig {
if c.gc.Cache.GCS.Bucket != "" {
return &c.gc.Cache.GCS
}
return nil
}
func (c *Configurator) GetCacheS3Config() *S3CacheConfig {
if c.gc.Cache.S3.Bucket != "" {
return &c.gc.Cache.S3
}
return nil
}
func (c *Configurator) GetDistributedCacheConfig() *DistributedCacheConfig {
if c.gc.Cache.DistributedCache.ListenAddr != "" {
return &c.gc.Cache.DistributedCache
}
return nil
}
func (c *Configurator) GetCacheMemcacheTargets() []string {
return c.gc.Cache.MemcacheTargets
}
func (c *Configurator) GetCacheRedisTarget() string {
// Prefer the target from Redis sub-config, is present.
if redisConfig := c.GetCacheRedisConfig(); redisConfig != nil {
return redisConfig.RedisTarget
}
return c.gc.Cache.RedisTarget
}
func (c *Configurator) GetCacheRedisConfig() *RedisCacheConfig {
if c.gc.Cache.Redis.RedisTarget != "" {
return &c.gc.Cache.Redis
}
return nil
}
func (c *Configurator) GetCacheInMemory() bool {
return c.gc.Cache.InMemory
}
func (c *Configurator) GetAnonymousUsageEnabled() bool {
return len(c.gc.Auth.OauthProviders) == 0 || c.gc.Auth.EnableAnonymousUsage
}
func (c *Configurator) GetAuthJWTKey() string {
return c.gc.Auth.JWTKey
}
func (c *Configurator) GetAuthOauthProviders() []OauthProvider {
op := c.gc.Auth.OauthProviders
if len(c.gc.Auth.OauthProviders) == 1 {
if cs := os.Getenv("BB_OAUTH_CLIENT_SECRET"); cs != "" {
op[0].ClientSecret = cs
}
}
return op
}
func (c *Configurator) GetAuthAPIKeyGroupCacheTTL() string {
return c.gc.Auth.APIKeyGroupCacheTTL
}
func (c *Configurator) GetSSLConfig() *SSLConfig {
if c.gc.SSL.EnableSSL {
return &c.gc.SSL
}
return nil
}
func (c *Configurator) GetRemoteExecutionConfig() *RemoteExecutionConfig {
if c.gc.RemoteExecution.EnableRemoteExec {
return &c.gc.RemoteExecution
}
return nil
}
func (c *Configurator) GetRemoteExecutionRedisTarget() string {
if rec := c.GetRemoteExecutionConfig(); rec != nil && rec.RedisTarget != "" {
return rec.RedisTarget
}
// Fall back to the cache redis target if redis target is not specified in remote execution config.
// Historically we did not have a separate redis target for remote execution.
return c.GetCacheRedisTarget()
}
func (c *Configurator) GetExecutorConfig() *ExecutorConfig {
return &c.gc.Executor
}
func (c *Configurator) GetAPIConfig() *APIConfig {
if c.gc.API.EnableAPI {
return &c.gc.API
}
return nil
}
func (c *Configurator) GetGithubConfig() *GithubConfig {
if c.gc.Github == (GithubConfig{}) {
return nil
}
ghc := c.gc.Github
if cs := os.Getenv("BB_GITHUB_CLIENT_SECRET"); cs != "" {
ghc.ClientSecret = cs
}
return &ghc
}
func (c *Configurator) GetOrgConfig() *OrgConfig {
if c.gc.Org.Name != "" || c.gc.Org.Domain != "" {
return &c.gc.Org
}
return nil
}
func (c *Configurator) GetTraceJaegerCollector() string {
return c.gc.App.TraceJaegerCollector
}
func (c *Configurator) GetTraceServiceName() string {
return c.gc.App.TraceServiceName
}
func (c *Configurator) GetTraceFraction() float64 {
return c.gc.App.TraceFraction
}
func (c *Configurator) GetTraceFractionOverrides() []string {
return c.gc.App.TraceFractionOverrides
}
func (c *Configurator) GetIgnoreForcedTracingHeader() bool {
return c.gc.App.IgnoreForcedTracingHeader
} | BufferSize int `yaml:"buffer_size" usage:"The number of build events to buffer locally when proxying build events."`
} |
doc.go | // Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
// Package webrisk is an auto-generated package for the
// Web Risk API.
//
// Use of Context
//
// The ctx passed to NewClient is used for authentication requests and
// for creating the underlying connection, but is not used for subsequent calls.
// Individual methods on the client use the ctx given to them.
//
// To close the open connection, use the Close() method.
//
// For information about setting deadlines, reusing contexts, and more
// please visit pkg.go.dev/cloud.google.com/go.
package webrisk // import "cloud.google.com/go/webrisk/apiv1"
import (
"context"
"os"
"runtime"
"strconv"
"strings"
"unicode"
"google.golang.org/api/option"
"google.golang.org/grpc/metadata"
)
// For more information on implementing a client constructor hook, see
// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors.
type clientHookParams struct{}
type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error)
const versionClient = "20210111"
func | (ctx context.Context, mds ...metadata.MD) context.Context {
out, _ := metadata.FromOutgoingContext(ctx)
out = out.Copy()
for _, md := range mds {
for k, v := range md {
out[k] = append(out[k], v...)
}
}
return metadata.NewOutgoingContext(ctx, out)
}
func checkDisableDeadlines() (bool, error) {
raw, ok := os.LookupEnv("GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE")
if !ok {
return false, nil
}
b, err := strconv.ParseBool(raw)
return b, err
}
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
func DefaultAuthScopes() []string {
return []string{
"https://www.googleapis.com/auth/cloud-platform",
}
}
// versionGo returns the Go runtime version. The returned string
// has no whitespace, suitable for reporting in header.
func versionGo() string {
const develPrefix = "devel +"
s := runtime.Version()
if strings.HasPrefix(s, develPrefix) {
s = s[len(develPrefix):]
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
s = s[:p]
}
return s
}
notSemverRune := func(r rune) bool {
return !strings.ContainsRune("0123456789.", r)
}
if strings.HasPrefix(s, "go1") {
s = s[2:]
var prerelease string
if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
s, prerelease = s[:p], s[p:]
}
if strings.HasSuffix(s, ".") {
s += "0"
} else if strings.Count(s, ".") < 2 {
s += ".0"
}
if prerelease != "" {
s += "-" + prerelease
}
return s
}
return "UNKNOWN"
}
| insertMetadata |
util.go | // Copyright (c) 2020 Blockwatch Data Inc.
// Author: [email protected]
package etl
import (
"fmt"
"github.com/cespare/xxhash"
"blockwatch.cc/tzindex/chain"
"blockwatch.cc/tzindex/etl/model"
"blockwatch.cc/tzindex/micheline"
)
// Smart Contract Storage Access
var vestingContractBalancePath = []int{1, 0, 0, 0}
func GetVestingBalance(prim *micheline.Prim) (int64, error) {
if prim == nil {
return 0, nil
}
for i, v := range vestingContractBalancePath {
if len(prim.Args) < v+1 {
return 0, fmt.Errorf("non existing path at %v in vesting contract storage", vestingContractBalancePath[:i])
}
prim = prim.Args[v]
}
if prim.Type != micheline.PrimInt {
return 0, fmt.Errorf("unexpected prim type %s for vesting contract balance", prim.Type)
}
return prim.Int.Int64(), nil
}
func hashKey(typ chain.AddressType, h []byte) uint64 {
var buf [21]byte
buf[0] = byte(typ)
copy(buf[1:], h)
return xxhash.Sum64(buf[:])
}
func accountHashKey(a *model.Account) uint64 |
func addressHashKey(a chain.Address) uint64 {
return hashKey(a.Type, a.Hash)
}
| {
return hashKey(a.Type, a.Hash)
} |
solution.py | from __future__ import annotations
import collections
import logging
import typing
from aoc.helpers import Puzzle
__all__ = ["part_one", "part_two", "prepare_puzzle"]
log = logging.getLogger(__name__)
class Instruction(typing.NamedTuple):
"""A ConsoleApplication instruction."""
operation: str
argument: int
@classmethod
def from_text(cls, instruction: str) -> Instruction:
"""Parse a raw text instruction and return an Instruction instance."""
operation, raw_argument = instruction.split(" ")
return cls(operation=operation, argument=int(raw_argument))
class ApplicationState(typing.NamedTuple):
"""An application exit state."""
success: bool
value: int
class ConsoleApplication:
"""A virtual handheld game console."""
def __init__(self, instructions: dict[int, Instruction]) -> None:
"""Parse the instructions and load the application into memory."""
self.instructions = dict(instructions)
self.pointer = 0
self.accumulator = 0
@classmethod
def from_raw_instructions(
cls: type[ConsoleApplication],
instructions: list[str]
) -> ConsoleApplication:
"""Create an application from a raw instruction set."""
instructions = {
i: Instruction.from_text(instruction) for i, instruction in enumerate(instructions)
}
return cls(instructions=instructions)
def | (self) -> ConsoleApplication:
"""Create a copy of the application."""
return type(self)(self.instructions)
def run(self, debug_mode: bool = False) -> ApplicationState:
"""
Run the application and return the final accumulator value as the exit code.
If run in safe mode, the application returns whenever it detects it has
entered an infinite loop by keeping track of the instructions it has
executed previously.
"""
if debug_mode:
seen = set()
while True:
if self.pointer in seen:
return ApplicationState(success=False, value=self.accumulator)
if self.pointer == len(self.instructions):
return ApplicationState(success=True, value=self.accumulator)
seen.add(self.pointer)
self.step()
else:
while True:
self.step()
if self.pointer == len(self.instructions):
return ApplicationState(success=True, value=self.accumulator)
def step(self) -> None:
"""Perform a single step in the application."""
operation, argument = self.instructions[self.pointer]
getattr(self, operation)(argument)
def acc(self, value: int) -> None:
"""Add a `value` to the accumulator and increase the pointer by one."""
self.accumulator += value
self.pointer += 1
def jmp(self, steps: int) -> None:
"""Execute a jump to another instruction relative to its own location."""
self.pointer += steps
def nop(self, _argument: int) -> None:
"""Do not do anything at all except going to the next instruction."""
self.pointer += 1
def debugger(application: ConsoleApplication) -> int:
"""
Debug a ConsoleApplication by tracing terminating paths.
This debugger works by taking the followings steps:
1. For each instruction position, determine which instructions end up there;
2. Use the instruction targets to trace which instructions will end up at
the termination location;
3. Run to the application, checking if an operation flip would make us jump
to a halting path target location.
It returns the final value after the application has halted successfully.
"""
# 1. For each instruction location, determine which instructions end up there.
instruction_destinations = collections.defaultdict(list)
for i, (instruction, value) in reversed(application.instructions.items()):
if instruction == "jmp":
instruction_destinations[i + value].append(i)
else:
instruction_destinations[i + 1].append(i)
# 2. Use the target locations of instructions to determine which
# instructions already lead naturally to the halting position.
targets = {len(application.instructions)}
targets_to_check = {len(application.instructions)}
while True:
new_targets = set()
for target in targets_to_check:
new_targets.update(instruction_destinations[target])
if not new_targets:
# No other instructions end up at an identified target instruction.
break
targets_to_check = new_targets
targets.update(new_targets)
# 3. Run the application, checking for each `jmp` or `nop` instruction if
# flipping it would result in the application hitting a target instruction.
debugged = False
while application.pointer != len(application.instructions):
operation, argument = application.instructions[application.pointer]
if not debugged and operation == "jmp" and application.pointer + 1 in targets:
application.pointer += 1
debugged = True
elif not debugged and operation == "nop" and application.pointer + argument in targets:
application.pointer += argument
debugged = True
else:
getattr(application, operation)(argument)
# Return the final value of the accumulator
return application.accumulator
def prepare_puzzle(puzzle: Puzzle) -> None:
"""Prepare the ConsoleApplication for today's puzzle."""
puzzle["application"] = ConsoleApplication.from_raw_instructions(puzzle.lines)
def part_one(puzzle: Puzzle) -> typing.Optional[typing.Union[str, int]]:
"""Return the solution for part one of this day."""
return puzzle["application"].run(debug_mode=True).value
def part_two(puzzle: Puzzle) -> typing.Optional[typing.Union[str, int]]:
"""Return the solution for part two of this day."""
return debugger(puzzle["application"].copy())
| copy |
vote.go | package types
import (
"bytes"
"errors"
"fmt"
"time"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/internal/libs/protoio"
tmbytes "github.com/tendermint/tendermint/libs/bytes"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
)
const (
nilVoteStr string = "nil-Vote"
// The maximum supported number of bytes in a vote extension.
MaxVoteExtensionSize int = 1024 * 1024
)
var (
ErrVoteUnexpectedStep = errors.New("unexpected step")
ErrVoteInvalidValidatorIndex = errors.New("invalid validator index")
ErrVoteInvalidValidatorAddress = errors.New("invalid validator address")
ErrVoteInvalidSignature = errors.New("invalid signature")
ErrVoteInvalidBlockHash = errors.New("invalid block hash")
ErrVoteNonDeterministicSignature = errors.New("non-deterministic signature")
ErrVoteNil = errors.New("nil vote")
ErrVoteExtensionAbsent = errors.New("vote extension absent")
)
type ErrVoteConflictingVotes struct {
VoteA *Vote
VoteB *Vote
}
func (err *ErrVoteConflictingVotes) Error() string {
return fmt.Sprintf("conflicting votes from validator %X", err.VoteA.ValidatorAddress)
}
func NewConflictingVoteError(vote1, vote2 *Vote) *ErrVoteConflictingVotes {
return &ErrVoteConflictingVotes{
VoteA: vote1,
VoteB: vote2,
}
}
// Address is hex bytes.
type Address = crypto.Address
// Vote represents a prevote, precommit, or commit vote from validators for
// consensus.
type Vote struct {
Type tmproto.SignedMsgType `json:"type"`
Height int64 `json:"height,string"`
Round int32 `json:"round"` // assume there will not be greater than 2_147_483_647 rounds
BlockID BlockID `json:"block_id"` // zero if vote is nil.
Timestamp time.Time `json:"timestamp"`
ValidatorAddress Address `json:"validator_address"`
ValidatorIndex int32 `json:"validator_index"`
Signature []byte `json:"signature"`
Extension []byte `json:"extension"`
ExtensionSignature []byte `json:"extension_signature"`
}
// VoteFromProto attempts to convert the given serialization (Protobuf) type to
// our Vote domain type. No validation is performed on the resulting vote -
// this is left up to the caller to decide whether to call ValidateBasic or
// ValidateWithExtension.
func VoteFromProto(pv *tmproto.Vote) (*Vote, error) {
blockID, err := BlockIDFromProto(&pv.BlockID)
if err != nil {
return nil, err
}
return &Vote{
Type: pv.Type,
Height: pv.Height,
Round: pv.Round,
BlockID: *blockID,
Timestamp: pv.Timestamp,
ValidatorAddress: pv.ValidatorAddress,
ValidatorIndex: pv.ValidatorIndex,
Signature: pv.Signature,
Extension: pv.Extension,
ExtensionSignature: pv.ExtensionSignature,
}, nil
}
// CommitSig converts the Vote to a CommitSig.
func (vote *Vote) CommitSig() CommitSig {
if vote == nil {
return NewCommitSigAbsent()
}
var blockIDFlag BlockIDFlag
switch {
case vote.BlockID.IsComplete():
blockIDFlag = BlockIDFlagCommit
case vote.BlockID.IsNil():
blockIDFlag = BlockIDFlagNil
default:
panic(fmt.Sprintf("Invalid vote %v - expected BlockID to be either empty or complete", vote))
}
return CommitSig{
BlockIDFlag: blockIDFlag,
ValidatorAddress: vote.ValidatorAddress,
Timestamp: vote.Timestamp,
Signature: vote.Signature,
}
}
// StripExtension removes any extension data from the vote. Useful if the
// chain has not enabled vote extensions.
// Returns true if extension data was present before stripping and false otherwise.
func (vote *Vote) StripExtension() bool {
stripped := len(vote.Extension) > 0 || len(vote.ExtensionSignature) > 0
vote.Extension = nil
vote.ExtensionSignature = nil
return stripped
}
// ExtendedCommitSig attempts to construct an ExtendedCommitSig from this vote.
// Panics if either the vote extension signature is missing or if the block ID
// is not either empty or complete.
func (vote *Vote) ExtendedCommitSig() ExtendedCommitSig {
if vote == nil {
return NewExtendedCommitSigAbsent()
}
return ExtendedCommitSig{
CommitSig: vote.CommitSig(),
Extension: vote.Extension,
ExtensionSignature: vote.ExtensionSignature,
}
}
// VoteSignBytes returns the proto-encoding of the canonicalized Vote, for
// signing. Panics if the marshaling fails.
//
// The encoded Protobuf message is varint length-prefixed (using MarshalDelimited)
// for backwards-compatibility with the Amino encoding, due to e.g. hardware
// devices that rely on this encoding.
//
// See CanonicalizeVote
func VoteSignBytes(chainID string, vote *tmproto.Vote) []byte {
pb := CanonicalizeVote(chainID, vote)
bz, err := protoio.MarshalDelimited(&pb)
if err != nil {
panic(err)
}
return bz
}
// VoteExtensionSignBytes returns the proto-encoding of the canonicalized vote
// extension for signing. Panics if the marshaling fails.
//
// Similar to VoteSignBytes, the encoded Protobuf message is varint
// length-prefixed for backwards-compatibility with the Amino encoding.
func VoteExtensionSignBytes(chainID string, vote *tmproto.Vote) []byte {
pb := CanonicalizeVoteExtension(chainID, vote)
bz, err := protoio.MarshalDelimited(&pb)
if err != nil {
panic(err)
}
return bz
}
func (vote *Vote) Copy() *Vote {
voteCopy := *vote
return &voteCopy
}
// String returns a string representation of Vote.
//
// 1. validator index
// 2. first 6 bytes of validator address
// 3. height
// 4. round,
// 5. type byte
// 6. type string
// 7. first 6 bytes of block hash
// 8. first 6 bytes of signature
// 9. first 6 bytes of vote extension
// 10. timestamp
func (vote *Vote) String() string {
if vote == nil {
return nilVoteStr
}
var typeString string
switch vote.Type {
case tmproto.PrevoteType:
typeString = "Prevote"
case tmproto.PrecommitType:
typeString = "Precommit"
default:
panic("Unknown vote type")
}
return fmt.Sprintf("Vote{%v:%X %v/%02d/%v(%v) %X %X %X @ %s}",
vote.ValidatorIndex,
tmbytes.Fingerprint(vote.ValidatorAddress),
vote.Height,
vote.Round,
vote.Type,
typeString,
tmbytes.Fingerprint(vote.BlockID.Hash),
tmbytes.Fingerprint(vote.Signature),
tmbytes.Fingerprint(vote.Extension),
CanonicalTime(vote.Timestamp),
)
}
func (vote *Vote) verifyAndReturnProto(chainID string, pubKey crypto.PubKey) (*tmproto.Vote, error) {
if !bytes.Equal(pubKey.Address(), vote.ValidatorAddress) {
return nil, ErrVoteInvalidValidatorAddress
}
v := vote.ToProto()
if !pubKey.VerifySignature(VoteSignBytes(chainID, v), vote.Signature) {
return nil, ErrVoteInvalidSignature
}
return v, nil
}
// Verify checks whether the signature associated with this vote corresponds to
// the given chain ID and public key. This function does not validate vote
// extension signatures - to do so, use VerifyWithExtension instead.
func (vote *Vote) Verify(chainID string, pubKey crypto.PubKey) error {
_, err := vote.verifyAndReturnProto(chainID, pubKey)
return err
}
// VerifyVoteAndExtension performs the same verification as Verify, but
// additionally checks whether the vote extension signature corresponds to the
// given chain ID and public key. We only verify vote extension signatures for
// precommits.
func (vote *Vote) VerifyVoteAndExtension(chainID string, pubKey crypto.PubKey) error {
v, err := vote.verifyAndReturnProto(chainID, pubKey)
if err != nil {
return err
}
// We only verify vote extension signatures for non-nil precommits.
if vote.Type == tmproto.PrecommitType && !ProtoBlockIDIsNil(&v.BlockID) {
extSignBytes := VoteExtensionSignBytes(chainID, v)
if !pubKey.VerifySignature(extSignBytes, vote.ExtensionSignature) {
return ErrVoteInvalidSignature
}
}
return nil
}
// VerifyExtension checks whether the vote extension signature corresponds to the
// given chain ID and public key.
func (vote *Vote) VerifyExtension(chainID string, pubKey crypto.PubKey) error {
if vote.Type != tmproto.PrecommitType || vote.BlockID.IsNil() {
return nil
}
v := vote.ToProto()
extSignBytes := VoteExtensionSignBytes(chainID, v)
if !pubKey.VerifySignature(extSignBytes, vote.ExtensionSignature) {
return ErrVoteInvalidSignature
}
return nil
}
// ValidateBasic checks whether the vote is well-formed. It does not, however,
// check vote extensions - for vote validation with vote extension validation,
// use ValidateWithExtension.
func (vote *Vote) ValidateBasic() error {
if !IsVoteTypeValid(vote.Type) {
return errors.New("invalid Type")
}
if vote.Height < 0 {
return errors.New("negative Height")
}
if vote.Round < 0 {
return errors.New("negative Round")
}
// NOTE: Timestamp validation is subtle and handled elsewhere.
if err := vote.BlockID.ValidateBasic(); err != nil {
return fmt.Errorf("wrong BlockID: %w", err)
}
// BlockID.ValidateBasic would not err if we for instance have an empty hash but a
// non-empty PartsSetHeader:
if !vote.BlockID.IsNil() && !vote.BlockID.IsComplete() {
return fmt.Errorf("blockID must be either empty or complete, got: %v", vote.BlockID)
}
if len(vote.ValidatorAddress) != crypto.AddressSize {
return fmt.Errorf("expected ValidatorAddress size to be %d bytes, got %d bytes",
crypto.AddressSize,
len(vote.ValidatorAddress),
)
}
if vote.ValidatorIndex < 0 {
return errors.New("negative ValidatorIndex")
}
if len(vote.Signature) == 0 {
return errors.New("signature is missing")
}
if len(vote.Signature) > MaxSignatureSize {
return fmt.Errorf("signature is too big (max: %d)", MaxSignatureSize)
}
// We should only ever see vote extensions in non-nil precommits, otherwise
// this is a violation of the specification.
// https://github.com/tendermint/tendermint/issues/8487
if vote.Type != tmproto.PrecommitType || (vote.Type == tmproto.PrecommitType && vote.BlockID.IsNil()) |
if vote.Type == tmproto.PrecommitType && !vote.BlockID.IsNil() {
if len(vote.ExtensionSignature) > MaxSignatureSize {
return fmt.Errorf("vote extension signature is too big (max: %d)", MaxSignatureSize)
}
if len(vote.ExtensionSignature) == 0 && len(vote.Extension) != 0 {
return fmt.Errorf("vote extension signature absent on vote with extension")
}
}
return nil
}
// EnsureExtension checks for the presence of extensions signature data
// on precommit vote types.
func (vote *Vote) EnsureExtension() error {
// We should always see vote extension signatures in non-nil precommits
if vote.Type != tmproto.PrecommitType {
return nil
}
if vote.BlockID.IsNil() {
return nil
}
if len(vote.ExtensionSignature) > 0 {
return nil
}
return ErrVoteExtensionAbsent
}
// ToProto converts the handwritten type to proto generated type
// return type, nil if everything converts safely, otherwise nil, error
func (vote *Vote) ToProto() *tmproto.Vote {
if vote == nil {
return nil
}
return &tmproto.Vote{
Type: vote.Type,
Height: vote.Height,
Round: vote.Round,
BlockID: vote.BlockID.ToProto(),
Timestamp: vote.Timestamp,
ValidatorAddress: vote.ValidatorAddress,
ValidatorIndex: vote.ValidatorIndex,
Signature: vote.Signature,
Extension: vote.Extension,
ExtensionSignature: vote.ExtensionSignature,
}
}
func VotesToProto(votes []*Vote) []*tmproto.Vote {
if votes == nil {
return nil
}
res := make([]*tmproto.Vote, 0, len(votes))
for _, vote := range votes {
v := vote.ToProto()
// protobuf crashes when serializing "repeated" fields with nil elements
if v != nil {
res = append(res, v)
}
}
return res
}
| {
if len(vote.Extension) > 0 {
return errors.New("unexpected vote extension")
}
if len(vote.ExtensionSignature) > 0 {
return errors.New("unexpected vote extension signature")
}
} |
gpib.py | import visa
import numpy as np
import logging
from datetime import datetime
resource_manager = visa.ResourceManager()
class Aglient33250A(object):
def __init__(self):
self.instr = self.open_instrument()
def open_instrument(self):
resource_list = resource_manager.list_resources()
gpib_address_list = filter(lambda x: x[:4] == 'GPIB', resource_list)
for addr in gpib_address_list:
instr = resource_manager.open_resource(addr)
idn = instr.query('*IDN?')
if 'Agilent Technologies,33250A' in idn:
return instr
else:
raise GPIBError('Aglient33250A function generator not in GPIB device list')
# device not round raise exception
def set_output(self, state):
"""Sets whether the function generator is outputting a voltage."""
if state:
self.instr.write('OUTP ON')
else:
self.instr.write('OUTP OFF')
def set_fm_ext(self, freq, amplitude, peak_freq_dev=None,
output_state=True):
"""Sets the func generator to frequency modulation with external modulation.
freq is the carrier frequency in Hz."""
if peak_freq_dev is None:
peak_freq_dev = freq
commands = ['FUNC SIN', # set to output sine functions
'FM:STAT ON',
'FREQ {0}'.format(freq),
'FM:SOUR EXT',
# 'FM:FREQ {0}'.format(freq),
'FM:DEV {0}'.format(peak_freq_dev),
'VOLT {0}'.format(amplitude),
'VOLT:OFFS 0'] # set to frequency modulation
if output_state is True:
commands.append('OUTP ON')
else:
commands.append('OUTP OFF')
command_string = '\n'.join(commands)
print_string = '\n\t' + command_string.replace('\n', '\n\t')
logging.info(print_string)
self.instr.write(command_string)
# self.read_all_errors()
def set_burst(self, freq, amplitude, period, output_state=True):
"""Sets the func generator to burst mode with external trigerring."""
ncyc = int(period*freq)
commands = ['FUNC SIN',
'BURS:STAT ON',
'BURS:MODE TRIG', # external trigger
'TRIG:SOUR EXT',
'TRIG:SLOP POS',
'FREQ {0}'.format(freq),
'VOLT {0}'.format(amplitude),
'VOLT:OFFS 0',
'BURS:NCYC {0}'.format(ncyc)]
if output_state is True:
commands.append('OUTP ON')
else:
commands.append('OUTP OFF')
command_string = '\n'.join(commands)
print_string = '\n\t' + command_string.replace('\n', '\n\t')
logging.info(print_string)
self.instr.write(command_string)
# self.read_all_errors()
def set_continuous(self, freq, amplitude, offset, output_state=True): | commands = ['FUNC SIN',
'BURS:STAT OFF',
'SWE:STAT OFF',
'FM:STAT OFF',
'FREQ {0}'.format(freq),
'VOLT {0}'.format(amplitude),
'VOLT:OFFS {0}'.format(offset),
]
if output_state is True:
commands.append('OUTP ON')
else:
commands.append('OUTP OFF')
command_string = '\n'.join(commands)
print_string = '\n\t' + command_string.replace('\n', '\n\t')
logging.info(print_string)
self.instr.write(command_string)
# self.read_all_errors()
def set_freq_sweep(self, start_freq, stop_freq, sweep_time, amplitude,
output_state=True):
commands = ['FUNC SIN',
'TRIG:SOUR EXT',
'TRIG:SLOP POS',
'SWE:STAT ON',
'FREQ:STAR {0}'.format(start_freq),
'FREQ:STOP {0}'.format(stop_freq),
'SWE:TIME {0}'.format(sweep_time),
'VOLT {0}'.format(amplitude),
'VOLT:OFFS 0',
'SWE:STAT ON']
if output_state is True:
commands.append('OUTP ON')
else:
commands.append('OUTP OFF')
command_string = '\n'.join(commands)
print_string = '\n\t' + command_string.replace('\n', '\n\t')
logging.info(print_string)
self.instr.write(command_string)
def set_arbitrary(self, freq, low_volt, high_volt, output_state=True):
"""Programs the function generator to output the arbitrary waveform."""
commands = ['FUNC USER',
'BURS:STAT OFF',
'SWE:STAT OFF',
'FM:STAT OFF',
'FREQ {0}'.format(freq),
'VOLT:HIGH {0}'.format(high_volt),
'VOLT:LOW {0}'.format(low_volt),
]
if output_state is True:
commands.append('OUTP ON')
else:
commands.append('OUTP OFF')
command_string = '\n'.join(commands)
print_string = '\n\t' + command_string.replace('\n', '\n\t')
logging.info(print_string)
self.instr.write(command_string)
def read_all_errors(self):
done = False
while not done:
err = self.instr.query('SYST:ERR?')
print(err)
if err[:2] == '+0':
done = True
class TektronixTDS1002(object):
def __init__(self):
self.instr = self.open_instrument()
def open_instrument(self):
resource_list = resource_manager.list_resources()
gpib_address_list = filter(lambda x: x[:4] == 'GPIB', resource_list)
for addr in gpib_address_list:
instr = resource_manager.open_resource(addr)
idn = instr.query('*IDN?')
if 'TEKTRONIX,TDS 1002' in idn:
return instr
else:
raise GPIBError('TektronicsTDS1002 oscilloscope not in GPIB device list')
# device not round raise exception
def get_data(self, channel=1):
hor_pos = float(self.instr.query('HOR:MAI:POS?'))
hor_scale = float(self.instr.query('HOR:MAI:SCA?'))
ch1_pos = float(self.instr.query('CH{0}:POS?'.format(channel)))
ch1_sca = float(self.instr.query('CH{0}:SCA?'.format(channel)))
commands = ['DATA:WIDTH 1',
'DATA:STAR 1',
'DATA:STOP 2500',
'DATA:SOU CH{0}'.format(channel),
'CURV?']
command_string = '\r\n'.join(commands)
self.instr.write(command_string)
# the first 6 bytes are #42500 and the last byte is \n
# ignore those
data = self.instr.read_raw()[6:-1]
data = np.fromstring(data, dtype=np.int8)
data_scaled = (np.array(data, dtype='float')*(10.0/2**8) - ch1_pos)*ch1_sca
time_array = np.arange(len(data_scaled), dtype='float')*10.0*hor_scale/len(data_scaled)
return time_array, data_scaled
def get_save_data(self, file_path, channel=1):
hor_pos = float(self.instr.query('HOR:MAI:POS?'))
hor_scale = float(self.instr.query('HOR:MAI:SCA?'))
ch1_pos = float(self.instr.query('CH{0}:POS?'.format(channel)))
ch1_sca = float(self.instr.query('CH{0}:SCA?'.format(channel)))
commands = ['DATA:WIDTH 1',
'DATA:STAR 1',
'DATA:STOP 2500',
'DATA:SOU CH{0}'.format(channel),
'CURV?']
command_string = '\r\n'.join(commands)
self.instr.write(command_string)
# the first 6 bytes are #42500 and the last byte is \n
# ignore those
data = self.instr.read_raw()[6:-1]
data = np.fromstring(data, dtype=np.int8)
data_scaled = (np.array(data, dtype='float')*(10.0/2**8) - ch1_pos)*ch1_sca
time_array = np.arange(len(data_scaled), dtype='float')*10.0*hor_scale/len(data_scaled)
np.savetxt(file_path + '\\' + datetime.now().strftime('%Y_%m_%d_%H_%M_%S') + '.txt', (time_array, data_scaled), fmt='%1.4e')
#return time_array, data_scaled
class TektronixTDS2012C(TektronixTDS1002):
def __init__(self):
self.instr = self.open_instrument()
super(TektronixTDS2012C, self).__init__()
def open_instrument(self):
resource_list = resource_manager.list_resources()
gpib_address_list = filter(lambda x: x[:3] == 'USB', resource_list)
for addr in gpib_address_list:
instr = resource_manager.open_resource(addr)
idn = instr.query('*IDN?')
if 'TEKTRONIX,TDS 2012C' in idn:
return instr
else:
raise GPIBError('TektronixTDS2012C oscilloscope not in USB device list')
# device not round raise exception
class NewportESP300(object):
def __init__(self):
self.instr = self.open_instrument()
def open_instrument(self):
resource_list = resource_manager.list_resources()
gpib_address_list = filter(lambda x: x[:4] == 'GPIB', resource_list)
for addr in gpib_address_list:
instr = resource_manager.open_resource(addr)
idn = instr.query('*IDN?')
if 'ESP300 Version' in idn:
return instr
else:
raise GPIBError('ESP300 Motion Controller not in GPIB device list')
# device not round raise exception
def read_position(self, num_axes=2):
for i in range(num_axes-1):
pos = self.instr.query(str(i+1)+'TP?')
print('Pos' + str(i+1) + ' ' + pos[:8])
def move_absposition(self, abs_pos, axis):
self.instr.write(str(int(axis))+'PA'+str(np.around(abs_pos, decimals=3)))
print('Set Axis ' + str(axis) + ' to ' + str(np.around(abs_pos, decimals=3)))
def read_all_errors(self):
done = False
while not done:
err = self.instr.query('TB?')
print(err)
if 'NO ERROR DETECTED' in err:
done = True
class AgilentN900A(object):
def __init__(self):
self.instr = self.open_instrument()
def open_instrument(self):
IP = '192.168.0.109'
instr = resource_manager.get_instrument('TCPIP::' + IP + '::INSTR')
return instr
def get_n_save_marker_pos(self, file_path, channel=1):
self.instr.write(':CALC:MARK1:X?')
freq = np.float(self.instr.read())
self.instr.write(':CALC:MARK1:Y?')
amp = np.float(self.instr.read())
self.instr.write(':AVER:STAT OFF')
arr_write = np.array([freq, amp])
f_handle = open(file_path + '\\' + datetime.now().strftime('%Y_%m_%d_%H_%M_%S') + '.txt', 'ab')
np.savetxt(f_handle, arr_write.reshape(1, arr_write.shape[0]))
f_handle.close()
def trigger_marker_avg(self,num_avg=100,freq=6.83468,span=25,ref_lev=15):
commands = [':FREQ:CENT {0}'.format(freq) + ' GHz',
':FREQ:SPAN {0}'.format(span) + ' MHz',
':DISP:WIND:TRAC:Y:RLEV {0}'.format(ref_lev) + ' dBm',
':CALC:MARK:MODE POS',
':CALC:MARK:CPS ON',
':TRIG:SOUR EXT1',
':TRIG:EXT1:LEV 1.0V',
':AVER:STAT ON',
':AVER:COUNT {0}'.format(num_avg)
]
command_string = '\n'.join(commands)
print_string = '\n\t' + command_string.replace('\n', '\n\t')
logging.info(print_string)
self.instr.write(command_string)
class SRSSG384(object):
def __init__(self):
self.instr = self.open_instrument()
def open_instrument(self):
resource_list = resource_manager.list_resources()
gpib_address_list = filter(lambda x: x[:4] == 'GPIB', resource_list)
for addr in gpib_address_list:
instr = resource_manager.open_resource(addr)
idn = instr.query('*IDN?')
if 'Stanford Research Systems,SG384' in idn:
return instr
else:
raise GPIBError('SRS SG384 function generator not in GPIB device list')
# device not found raise exception
def read_all_errors(self):
done = False
while not done:
err = self.instr.query('LERR?')
print(err)
if err[:1] == '0':
done = True
def set_continuous(self, freq, amplitude, offset, output_state=True):
"""Programs the Stanford MW function generator to output a continuous sine wave.
External 'triggering' is accomplished using the MW switch."""
commands = ['MODL 0', #disable any modulation
'FREQ {0}'.format(freq)
]
if freq > 4.05e9:
commands.append('AMPH {0}'.format(amplitude)) #set rear RF doubler amplitude
if offset > 0.0:
print('HIGH FREQUENCY OUTPUT IS AC ONLY')
if output_state is True:
commands.append('ENBH 1') #enable output
else:
commands.append('ENBH 0')
elif freq < 62.5e6:
commands.extend(['AMPL {0}'.format(amplitude), 'OFSL {0}'.format(offset)]) #set front BNC amplitude
if output_state is True:
commands.append('ENBL 1') #enable output
else:
commands.append('ENBL 0')
command_string = '\n'.join(commands)
print_string = '\n\t' + command_string.replace('\n', '\n\t')
logging.info(print_string)
self.instr.write(command_string)
# print(print_string)
# self.read_all_errors()
def set_continuous_Vpp(self, freq, amplitude, offset, output_state=True):
"""Programs the Stanford MW function generator to output a continuous sine wave.
External 'triggering' is accomplished using the MW switch."""
commands = ['MODL 0', #disable any modulation
'FREQ {0}'.format(freq)
]
if freq > 4.05e9:
commands.append('AMPH {0} VPP'.format(amplitude)) #set rear RF doubler amplitude
if offset > 0.0:
print('HIGH FREQUENCY OUTPUT IS AC ONLY')
if output_state is True:
commands.append('ENBH 1') #enable output
else:
commands.append('ENBH 0')
elif freq < 62.5e6:
commands.extend(['AMPL {0} VPP'.format(amplitude), 'OFSL {0}'.format(offset)]) #set front BNC amplitude
if output_state is True:
commands.append('ENBL 1') #enable output
else:
commands.append('ENBL 0')
command_string = '\n'.join(commands)
print_string = '\n\t' + command_string.replace('\n', '\n\t')
logging.info(print_string)
self.instr.write(command_string)
def set_fm_ext(self, freq, amplitude, offset=0.0, peak_fm_deviation=None, output_state=True):
"""Sets the Stanford MW function generator to freq modulation with external modulation.
freq is the carrier frequency in Hz."""
if peak_fm_deviation is None:
peak_fm_deviation = freq
commands = ['TYPE 1', #set to FM
'MFNC 5', #external modulation
'FREQ {0}'.format(freq),
'FDEV {0}'.format(peak_fm_deviation),
'MODL 1' #enable modulation
]
if freq > 4.05e9:
commands.append('AMPH {0}'.format(amplitude)) #set rear RF doubler amplitude
if offset > 0.0:
print('HIGH FREQUENCY OUTPUT IS AC ONLY')
if output_state is True:
commands.append('ENBH 1') #enable output
else:
commands.append('ENBH 0')
elif freq < 62.5e6:
commands.extend(['AMPL {0}'.format(amplitude), 'OFSL {0}'.format(offset)]) #set front BNC amplitude
if output_state is True:
commands.append('ENBL 1') #enable output
else:
commands.append('ENBL 0')
command_string = '\n'.join(commands)
print_string = '\n\t' + command_string.replace('\n', '\n\t')
logging.info(print_string)
self.instr.write(command_string)
#print(print_string)
#self.read_all_errors()
def set_freqsweep_ext(self, amplitude, sweep_low_end, sweep_high_end, offset=0.0, output_state=True):
"""Sets the Stanford MW function generator to freq modulation with external modulation.
freq is the carrier frequency in Hz."""
sweep_deviation = round(abs(sweep_low_end - sweep_high_end)/2.0,6)
freq = sweep_low_end + sweep_deviation
commands = ['TYPE 3', #set to sweep
'SFNC 5', #external modulation
'FREQ {0}'.format(freq),
'SDEV {0}'.format(sweep_deviation),
'MODL 1' #enable modulation
]
if freq > 4.05e9:
commands.append('AMPH {0}'.format(amplitude)) #set rear RF doubler amplitude
if offset > 0.0:
print('HIGH FREQUENCY OUTPUT IS AC ONLY')
if output_state is True:
commands.append('ENBH 1') #enable output
else:
commands.append('ENBH 0')
elif freq < 62.5e6:
commands.extend(['AMPL {0}'.format(amplitude), 'OFSL {0}'.format(offset)]) #set front BNC amplitude
if output_state is True:
commands.append('ENBL 1') #enable output
else:
commands.append('ENBL 0')
command_string = '\n'.join(commands)
print_string = '\n\t' + command_string.replace('\n', '\n\t')
logging.info(print_string)
self.instr.write(command_string)
#print(print_string)
#self.read_all_errors()
def set_output(self, state):
"""Sets whether the function generator is outputting a voltage."""
freq = float(self.instr.query('FREQ?'))
if freq > 4.05e9:
if state:
self.instr.write('ENBH 1') #enable output
else:
self.instr.write('ENBH 0')
elif freq < 62.5e6:
if state:
self.instr.write('ENBL 1') #enable output
else:
self.instr.write('ENBL 0')
def trigger_ListMode(self):
"""Iterates the function generator to the next state in ListMode
NOTE: ListMode does not enable outputs, but only writes the function
generator state. Output must be enabled separately"""
self.instr.write('*TRG')
def disable_all(self, disable):
"""Disables all modulation and outputs of the Standford MW func. generator"""
commands = ['ENBH 0', #disable high freq. rear output
'ENBL 0', #disable low freq. front bnc
'MODL 0' #disable modulation
]
command_string = '\n'.join(commands)
print_string = '\n\t' + command_string.replace('\n', '\n\t')
logging.info(print_string)
if disable:
self.instr.write(command_string)
# self.read_all_errors()
# def set_MWinstr_freq_sweep(self, mod_type, freq, amplitude, mod_rate, mod_deviation, list_size=2, list_enable=True):
# """Sets the Stanford MW device to an instrument to be triggered later."""
# #create list of instrument states
# self.instr.query('LSTC? {0}'.format(list_size))
# for j in range(list_size):
# #enable to list for triggering
# cur_enable_state = self.instr.query('LSTE?')
# if cur_enable_state == False:
# self.instr.write('LSTE 1')
class RigolDG1022Z(object):
def __init__(self):
self.instr = self.open_instrument()
def open_instrument(self):
resource_list = resource_manager.list_resources()
gpib_address_list = filter(lambda x: x[:3] == 'USB', resource_list)
for addr in gpib_address_list:
instr = resource_manager.open_resource(addr)
idn = instr.query('*IDN?')
if 'Rigol Technologies,DG1022Z,DG1ZA184750979' in idn:
return instr
else:
raise GPIBError('Rigol DG1022Z function generator not in USB device list')
# device not round raise exception
def set_output(self, state, channel=2):
"""Sets whether the function generator is outputting a voltage."""
if state:
self.instr.write(':OUTP{0} ON'.format(channel))
else:
self.instr.write(':OUTP{0} OFF'.format(channel))
def set_continuous(self, freq, amplitude, offset, phase, channel=2):
"""Programs the function generator to output a continuous sine wave."""
commands = [':SOUR{0}:APPL:SIN '.format(channel),
'{0},'.format(freq),
'{0},'.format(amplitude),
'{0},'.format(offset),
'{0}'.format(phase),
]
command_string = ''.join(commands)
logging.info(command_string)
self.instr.write(command_string)
def read_all_errors(self):
done = False
while not done:
err = self.instr.query('SYST:ERR?')
print(err)
if err[:2] == '+0':
done = True
class GPIBError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
#globals
agilent_33250a = Aglient33250A()
tektronixTDS1002 = TektronixTDS1002()
# agilentN900A = AgilentN900A()
#tektronixTDS2012C = TektronixTDS2012C()
stanfordSG384 = SRSSG384()
# newportesp300 = NewportESP300()
rigolDG1022Z = RigolDG1022Z() | """Programs the function generator to output a continuous sine wave.""" |
dependency_config.go | /*
Copyright 2017, 2018 Ankyra
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package core
import (
"fmt"
"strings"
"github.com/ankyra/escape-core/parsers"
"github.com/ankyra/escape-core/scopes"
)
/*
## Escape Plan
Dependencies are configured in the [`depends`](/docs/reference/escape-plan/#depends)
field of the Escape plan.
*/
type DependencyConfig struct {
// The release id is required and is resolved at *build* time and then
// persisted in the release metadata ensuring that deployments always use
// the same versions.
//
// Examples:
// - To always use the latest version: `my-organisation/my-dependency-latest`
// - To always use version 0.1.1: `my-organisation/my-dependency-v0.1.1`
// - To always use the latest version in the 0.1 series: `my-organisation/my-dependency-v0.1.@`
// - To make it possible to reference a dependency using a different name: `my-organisation/my-dependency-latest as my-name`
ReleaseId string `json:"release_id" yaml:"release_id"`
// Define the values of dependency inputs using Escape Script.
Mapping map[string]interface{} `json:"mapping" yaml:"mapping"`
// Define the values of dependency inputs using Escape Script when running
// stages in the build scope.
BuildMapping map[string]interface{} `json:"build_mapping" yaml:"build_mapping"`
// Define the values of dependency inputs using Escape Script when running
// stages in the deploy scope.
DeployMapping map[string]interface{} `json:"deploy_mapping" yaml:"deploy_mapping"`
// Map providers from the parent to dependencies.
//
// Example:
// ```
// consumes:
// - my-provider
// depends:
// - release_id: my-org/my-dep-latest
// consumes:
// provider: $my-provider.deployment
// ```
Consumes map[string]string `json:"consumes" yaml:"consumes"`
// The name of the (sub)-deployment. This defaults to the versionless release id;
// e.g. if the release_id is `my-org/my-dep-v1.0` then the DeploymentName will be
// `my-org/my-dep` by default.
DeploymentName string `json:"deployment_name" yaml:"deployment_name"`
// The variable used to reference this dependency. By default the variable
// name is the versionless release id of the dependency, but this can be
// overruled by renaming the dependency (e.g. `my-org/my-release-latest as
// my-variable`. This field will be set automatically at build time.
// Overwriting this field in the Escape plan has no effect.
VariableName string `json:"variable" yaml:"variable"`
// A list of scopes (`build`, `deploy`) that defines during which stage(s)
// this dependency should be fetched and deployed. *Currently not implemented!*
Scopes scopes.Scopes `json:"scopes" yaml:"scopes"`
// Parsed out of the release ID. For example: when release id is
// `"my-org/my-name-v1.0"` this value is `"my-org"`.
Project string `json:"-" yaml:"-"`
// Parsed out of the release ID. For example: when release id is
// `"my-org/my-name-v1.0"` this value is `"my-name"`.
Name string `json:"-" yaml:"-"`
// Parsed out of the release ID. For example: when release id is
// `"my-org/my-name-v1.0"` this value is `"1.0"`.
Version string `json:"-" yaml:"-"`
// Parsed out of the release ID. For example: when release id is
// `"my-org/my-name:tag"` this value is `"tag"`.
Tag string `json:"-" yaml:"-"`
}
type ResolvedDependencyConfig struct {
*DependencyConfig
ReleaseMetadata *ReleaseMetadata
}
func NewDependencyConfig(releaseId string) *DependencyConfig {
return &DependencyConfig{
ReleaseId: releaseId,
Mapping: map[string]interface{}{},
BuildMapping: map[string]interface{}{},
DeployMapping: map[string]interface{}{},
Scopes: scopes.AllScopes,
Consumes: map[string]string{},
}
}
func (d *DependencyConfig) Resolve(m *ReleaseMetadata) *ResolvedDependencyConfig {
return &ResolvedDependencyConfig{
DependencyConfig: d,
ReleaseMetadata: m,
}
}
func DependencyNeedsResolvingError(dependencyReleaseId string) error {
return fmt.Errorf("The dependency '%s' needs its version resolved.", dependencyReleaseId)
}
func (d *DependencyConfig) Copy() *DependencyConfig {
result := NewDependencyConfig(d.ReleaseId)
for k, v := range d.Mapping {
result.Mapping[k] = v
}
for k, v := range d.BuildMapping {
result.BuildMapping[k] = v
}
for k, v := range d.DeployMapping {
result.DeployMapping[k] = v
}
for k, v := range d.Consumes {
result.Consumes[k] = v
} | result.VariableName = d.VariableName
result.Scopes = d.Scopes.Copy()
result.Project = d.Project
result.Name = d.Name
result.Version = d.Version
result.Tag = d.Tag
return result
}
func (d *DependencyConfig) EnsureConfigIsParsed() error {
parsed, err := parsers.ParseDependency(d.ReleaseId)
if err != nil {
return err
}
d.ReleaseId = parsed.QualifiedReleaseId.ToString()
d.Project = parsed.Project
d.Name = parsed.Name
d.Version = parsed.Version
d.Tag = parsed.Tag
if d.VariableName == "" {
d.VariableName = parsed.VariableName
}
return nil
}
func (d *DependencyConfig) NeedsResolving() bool {
return d.Tag != "" || d.Version == "latest" || strings.HasSuffix(d.Version, ".@")
}
func (d *DependencyConfig) GetVersionAsString() (version string) {
if d.Tag != "" {
return d.Tag
}
version = "v" + d.Version
if d.Version == "latest" {
version = d.Version
}
return version
}
func (d *DependencyConfig) Validate(m *ReleaseMetadata) error {
if d.BuildMapping == nil {
d.BuildMapping = map[string]interface{}{}
}
if d.DeployMapping == nil {
d.DeployMapping = map[string]interface{}{}
}
if d.Scopes == nil || len(d.Scopes) == 0 {
d.Scopes = scopes.AllScopes
}
if err := d.EnsureConfigIsParsed(); err != nil {
return err
}
if d.NeedsResolving() {
return DependencyNeedsResolvingError(d.ReleaseId)
}
d.DeploymentName = d.VariableName
if d.DeploymentName == "" {
d.DeploymentName = d.Project + "/" + d.Name
}
if d.VariableName == "" {
d.VariableName = d.Project + "/" + d.Name
}
return nil
}
func (d *DependencyConfig) GetMapping(scope string) map[string]interface{} {
if scope == "build" {
return d.BuildMapping
}
if scope == "deploy" {
return d.DeployMapping
}
return nil
}
func (d *DependencyConfig) AddVariableMapping(scopes []string, id, key string) {
for _, scope := range scopes {
mapping := d.GetMapping(scope)
if mapping != nil {
_, found := mapping[id]
if !found {
mapping[id] = key
}
}
}
}
func (d *DependencyConfig) InScope(scope string) bool {
return d.Scopes.InScope(scope)
}
func ExpectingTypeForDependencyFieldError(typ, field string, val interface{}) error {
return fmt.Errorf("Expecting %s for dependency '%s'; got '%T'", typ, field, val)
}
func ExpectingStringKeyInMapError(field string, val interface{}) error {
return fmt.Errorf("Expecting string key in dependency '%s'; got '%T'", field, val)
}
func stringFromInterface(field string, val interface{}) (string, error) {
valString, ok := val.(string)
if !ok {
return "", ExpectingTypeForDependencyFieldError("string", field, val)
}
return valString, nil
}
func mapFromInterface(field string, val interface{}) (map[string]interface{}, error) {
valMap, ok := val.(map[interface{}]interface{})
if !ok {
return nil, ExpectingTypeForDependencyFieldError("dict", field, val)
}
result := map[string]interface{}{}
for k, v := range valMap {
kStr, ok := k.(string)
if !ok {
return nil, ExpectingStringKeyInMapError(field, k)
}
result[kStr] = v
}
return result, nil
}
func NewDependencyConfigFromMap(dep map[interface{}]interface{}) (*DependencyConfig, error) {
var releaseId, deploymentName, variable string
buildMapping := map[string]interface{}{}
deployMapping := map[string]interface{}{}
consumes := map[string]string{}
depScopes := []string{}
for key, val := range dep {
var err error
keyStr, ok := key.(string)
if !ok {
return nil, fmt.Errorf("Expecting string key in dependency")
}
if keyStr == "release_id" {
releaseId, err = stringFromInterface("release_id", val)
if err != nil {
return nil, err
}
} else if keyStr == "deployment_name" {
deploymentName, err = stringFromInterface("deployment_name", val)
if err != nil {
return nil, err
}
} else if keyStr == "variable" {
variable, err = stringFromInterface("variable", val)
if err != nil {
return nil, err
}
} else if key == "mapping" { // backwards compatibility with release metadata <= 6
valMap, err := mapFromInterface("mapping", val)
if err != nil {
return nil, err
}
for k, v := range valMap {
buildMapping[k] = v
deployMapping[k] = v
}
} else if key == "build_mapping" {
valMap, err := mapFromInterface("build_mapping", val)
if err != nil {
return nil, err
}
for k, v := range valMap {
buildMapping[k] = v
}
} else if key == "deploy_mapping" {
valMap, err := mapFromInterface("deploy_mapping", val)
if err != nil {
return nil, err
}
for k, v := range valMap {
deployMapping[k] = v
}
} else if key == "consumes" {
valMap, err := mapFromInterface("consumes", val)
if err != nil {
return nil, err
}
for k, v := range valMap {
vStr, ok := v.(string)
if !ok {
return nil, fmt.Errorf("Expecting string value in dependency consumer mapping")
}
consumes[k] = vStr
}
} else if key == "scopes" {
s, err := scopes.NewScopesFromInterface(val)
if err != nil {
return nil, err
}
depScopes = s
}
}
if releaseId == "" {
return nil, fmt.Errorf("Missing 'release_id' in dependency")
}
cfg := NewDependencyConfig(releaseId)
cfg.DeploymentName = deploymentName
cfg.VariableName = variable
cfg.BuildMapping = buildMapping
cfg.DeployMapping = deployMapping
cfg.Scopes = depScopes
cfg.Consumes = consumes
return cfg, nil
} | result.DeploymentName = d.DeploymentName |
view.rs | use serde::{Serialize, Deserialize};
use crate::Node;
use crate::router::Params;
pub type MetaTags = Vec<(String, String)>;
pub type Styles = Vec<String>;
pub type Body = Vec<Node>;
pub trait View
where
Self: Sized {
type Params: Params + 'static;
type Query: Params + 'static;
fn loader(
path: &str,
params: Self::Params,
query: Self::Query,
) -> Option<Self>;
fn title(&self) -> String;
fn meta(&self) -> MetaTags {
vec![]
}
fn styles(&self) -> Styles;
fn body(&self, nested_view: Option<Node>) -> Body;
}
| pub struct RenderedView {
pub title: String,
pub meta: MetaTags,
pub styles: Styles,
pub body: Node
} | #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] |
manageddatabasesensitivitylabels.go | package sql
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/tracing"
"net/http"
)
// ManagedDatabaseSensitivityLabelsClient is the the Azure SQL Database management API provides a RESTful set of web
// services that interact with Azure SQL Database services to manage your databases. The API enables you to create,
// retrieve, update, and delete databases.
type ManagedDatabaseSensitivityLabelsClient struct {
BaseClient
}
// NewManagedDatabaseSensitivityLabelsClient creates an instance of the ManagedDatabaseSensitivityLabelsClient client.
func NewManagedDatabaseSensitivityLabelsClient(subscriptionID string) ManagedDatabaseSensitivityLabelsClient {
return NewManagedDatabaseSensitivityLabelsClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewManagedDatabaseSensitivityLabelsClientWithBaseURI creates an instance of the
// ManagedDatabaseSensitivityLabelsClient client using a custom endpoint. Use this when interacting with an Azure
// cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
func NewManagedDatabaseSensitivityLabelsClientWithBaseURI(baseURI string, subscriptionID string) ManagedDatabaseSensitivityLabelsClient {
return ManagedDatabaseSensitivityLabelsClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// CreateOrUpdate creates or updates the sensitivity label of a given column
// Parameters:
// resourceGroupName - the name of the resource group that contains the resource. You can obtain this value
// from the Azure Resource Manager API or the portal.
// managedInstanceName - the name of the managed instance.
// databaseName - the name of the database.
// schemaName - the name of the schema.
// tableName - the name of the table.
// columnName - the name of the column.
// parameters - the column sensitivity label resource.
func (client ManagedDatabaseSensitivityLabelsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, managedInstanceName string, databaseName string, schemaName string, tableName string, columnName string, parameters SensitivityLabel) (result SensitivityLabel, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ManagedDatabaseSensitivityLabelsClient.CreateOrUpdate")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, managedInstanceName, databaseName, schemaName, tableName, columnName, parameters)
if err != nil {
err = autorest.NewErrorWithError(err, "sql.ManagedDatabaseSensitivityLabelsClient", "CreateOrUpdate", nil, "Failure preparing request")
return
}
resp, err := client.CreateOrUpdateSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "sql.ManagedDatabaseSensitivityLabelsClient", "CreateOrUpdate", resp, "Failure sending request")
return
}
result, err = client.CreateOrUpdateResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "sql.ManagedDatabaseSensitivityLabelsClient", "CreateOrUpdate", resp, "Failure responding to request")
return
}
return
}
// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
func (client ManagedDatabaseSensitivityLabelsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, managedInstanceName string, databaseName string, schemaName string, tableName string, columnName string, parameters SensitivityLabel) (*http.Request, error) {
pathParameters := map[string]interface{}{
"columnName": autorest.Encode("path", columnName),
"databaseName": autorest.Encode("path", databaseName),
"managedInstanceName": autorest.Encode("path", managedInstanceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"schemaName": autorest.Encode("path", schemaName),
"sensitivityLabelSource": autorest.Encode("path", "current"),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"tableName": autorest.Encode("path", tableName),
}
const APIVersion = "2020-11-01-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
parameters.ManagedBy = nil
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}/schemas/{schemaName}/tables/{tableName}/columns/{columnName}/sensitivityLabels/{sensitivityLabelSource}", pathParameters),
autorest.WithJSON(parameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
func (client ManagedDatabaseSensitivityLabelsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
// closes the http.Response Body.
func (client ManagedDatabaseSensitivityLabelsClient) CreateOrUpdateResponder(resp *http.Response) (result SensitivityLabel, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Delete deletes the sensitivity label of a given column
// Parameters:
// resourceGroupName - the name of the resource group that contains the resource. You can obtain this value
// from the Azure Resource Manager API or the portal.
// managedInstanceName - the name of the managed instance.
// databaseName - the name of the database.
// schemaName - the name of the schema.
// tableName - the name of the table.
// columnName - the name of the column.
func (client ManagedDatabaseSensitivityLabelsClient) Delete(ctx context.Context, resourceGroupName string, managedInstanceName string, databaseName string, schemaName string, tableName string, columnName string) (result autorest.Response, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ManagedDatabaseSensitivityLabelsClient.Delete")
defer func() {
sc := -1
if result.Response != nil {
sc = result.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.DeletePreparer(ctx, resourceGroupName, managedInstanceName, databaseName, schemaName, tableName, columnName)
if err != nil {
err = autorest.NewErrorWithError(err, "sql.ManagedDatabaseSensitivityLabelsClient", "Delete", nil, "Failure preparing request")
return
}
resp, err := client.DeleteSender(req)
if err != nil {
result.Response = resp
err = autorest.NewErrorWithError(err, "sql.ManagedDatabaseSensitivityLabelsClient", "Delete", resp, "Failure sending request")
return
}
result, err = client.DeleteResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "sql.ManagedDatabaseSensitivityLabelsClient", "Delete", resp, "Failure responding to request")
return
}
return
}
// DeletePreparer prepares the Delete request.
func (client ManagedDatabaseSensitivityLabelsClient) DeletePreparer(ctx context.Context, resourceGroupName string, managedInstanceName string, databaseName string, schemaName string, tableName string, columnName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"columnName": autorest.Encode("path", columnName),
"databaseName": autorest.Encode("path", databaseName),
"managedInstanceName": autorest.Encode("path", managedInstanceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"schemaName": autorest.Encode("path", schemaName),
"sensitivityLabelSource": autorest.Encode("path", "current"),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"tableName": autorest.Encode("path", tableName),
}
const APIVersion = "2020-11-01-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}/schemas/{schemaName}/tables/{tableName}/columns/{columnName}/sensitivityLabels/{sensitivityLabelSource}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client ManagedDatabaseSensitivityLabelsClient) DeleteSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// DeleteResponder handles the response to the Delete request. The method always
// closes the http.Response Body.
func (client ManagedDatabaseSensitivityLabelsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByClosing())
result.Response = resp
return
}
// DisableRecommendation disables sensitivity recommendations on a given column
// Parameters:
// resourceGroupName - the name of the resource group that contains the resource. You can obtain this value
// from the Azure Resource Manager API or the portal.
// managedInstanceName - the name of the managed instance.
// databaseName - the name of the database.
// schemaName - the name of the schema.
// tableName - the name of the table.
// columnName - the name of the column.
func (client ManagedDatabaseSensitivityLabelsClient) DisableRecommendation(ctx context.Context, resourceGroupName string, managedInstanceName string, databaseName string, schemaName string, tableName string, columnName string) (result autorest.Response, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ManagedDatabaseSensitivityLabelsClient.DisableRecommendation")
defer func() {
sc := -1
if result.Response != nil {
sc = result.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.DisableRecommendationPreparer(ctx, resourceGroupName, managedInstanceName, databaseName, schemaName, tableName, columnName)
if err != nil {
err = autorest.NewErrorWithError(err, "sql.ManagedDatabaseSensitivityLabelsClient", "DisableRecommendation", nil, "Failure preparing request")
return
}
resp, err := client.DisableRecommendationSender(req)
if err != nil {
result.Response = resp
err = autorest.NewErrorWithError(err, "sql.ManagedDatabaseSensitivityLabelsClient", "DisableRecommendation", resp, "Failure sending request")
return
}
result, err = client.DisableRecommendationResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "sql.ManagedDatabaseSensitivityLabelsClient", "DisableRecommendation", resp, "Failure responding to request")
return
}
return
}
// DisableRecommendationPreparer prepares the DisableRecommendation request.
func (client ManagedDatabaseSensitivityLabelsClient) DisableRecommendationPreparer(ctx context.Context, resourceGroupName string, managedInstanceName string, databaseName string, schemaName string, tableName string, columnName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"columnName": autorest.Encode("path", columnName),
"databaseName": autorest.Encode("path", databaseName),
"managedInstanceName": autorest.Encode("path", managedInstanceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"schemaName": autorest.Encode("path", schemaName),
"sensitivityLabelSource": autorest.Encode("path", "recommended"),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"tableName": autorest.Encode("path", tableName),
}
const APIVersion = "2020-11-01-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}/schemas/{schemaName}/tables/{tableName}/columns/{columnName}/sensitivityLabels/{sensitivityLabelSource}/disable", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DisableRecommendationSender sends the DisableRecommendation request. The method will close the
// http.Response Body if it receives an error.
func (client ManagedDatabaseSensitivityLabelsClient) DisableRecommendationSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// DisableRecommendationResponder handles the response to the DisableRecommendation request. The method always
// closes the http.Response Body.
func (client ManagedDatabaseSensitivityLabelsClient) DisableRecommendationResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByClosing())
result.Response = resp
return
}
// EnableRecommendation enables sensitivity recommendations on a given column (recommendations are enabled by default
// on all columns)
// Parameters:
// resourceGroupName - the name of the resource group that contains the resource. You can obtain this value
// from the Azure Resource Manager API or the portal.
// managedInstanceName - the name of the managed instance.
// databaseName - the name of the database.
// schemaName - the name of the schema.
// tableName - the name of the table.
// columnName - the name of the column.
func (client ManagedDatabaseSensitivityLabelsClient) EnableRecommendation(ctx context.Context, resourceGroupName string, managedInstanceName string, databaseName string, schemaName string, tableName string, columnName string) (result autorest.Response, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ManagedDatabaseSensitivityLabelsClient.EnableRecommendation")
defer func() {
sc := -1
if result.Response != nil {
sc = result.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.EnableRecommendationPreparer(ctx, resourceGroupName, managedInstanceName, databaseName, schemaName, tableName, columnName)
if err != nil {
err = autorest.NewErrorWithError(err, "sql.ManagedDatabaseSensitivityLabelsClient", "EnableRecommendation", nil, "Failure preparing request")
return
}
resp, err := client.EnableRecommendationSender(req)
if err != nil {
result.Response = resp
err = autorest.NewErrorWithError(err, "sql.ManagedDatabaseSensitivityLabelsClient", "EnableRecommendation", resp, "Failure sending request")
return
}
result, err = client.EnableRecommendationResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "sql.ManagedDatabaseSensitivityLabelsClient", "EnableRecommendation", resp, "Failure responding to request")
return
}
return
}
// EnableRecommendationPreparer prepares the EnableRecommendation request.
func (client ManagedDatabaseSensitivityLabelsClient) EnableRecommendationPreparer(ctx context.Context, resourceGroupName string, managedInstanceName string, databaseName string, schemaName string, tableName string, columnName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"columnName": autorest.Encode("path", columnName),
"databaseName": autorest.Encode("path", databaseName),
"managedInstanceName": autorest.Encode("path", managedInstanceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"schemaName": autorest.Encode("path", schemaName),
"sensitivityLabelSource": autorest.Encode("path", "recommended"),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"tableName": autorest.Encode("path", tableName),
}
const APIVersion = "2020-11-01-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}/schemas/{schemaName}/tables/{tableName}/columns/{columnName}/sensitivityLabels/{sensitivityLabelSource}/enable", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// EnableRecommendationSender sends the EnableRecommendation request. The method will close the
// http.Response Body if it receives an error.
func (client ManagedDatabaseSensitivityLabelsClient) EnableRecommendationSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// EnableRecommendationResponder handles the response to the EnableRecommendation request. The method always
// closes the http.Response Body.
func (client ManagedDatabaseSensitivityLabelsClient) EnableRecommendationResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByClosing())
result.Response = resp
return
}
// Get gets the sensitivity label of a given column
// Parameters:
// resourceGroupName - the name of the resource group that contains the resource. You can obtain this value
// from the Azure Resource Manager API or the portal.
// managedInstanceName - the name of the managed instance.
// databaseName - the name of the database.
// schemaName - the name of the schema.
// tableName - the name of the table.
// columnName - the name of the column.
// sensitivityLabelSource - the source of the sensitivity label.
func (client ManagedDatabaseSensitivityLabelsClient) Get(ctx context.Context, resourceGroupName string, managedInstanceName string, databaseName string, schemaName string, tableName string, columnName string, sensitivityLabelSource SensitivityLabelSource) (result SensitivityLabel, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ManagedDatabaseSensitivityLabelsClient.Get")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.GetPreparer(ctx, resourceGroupName, managedInstanceName, databaseName, schemaName, tableName, columnName, sensitivityLabelSource)
if err != nil {
err = autorest.NewErrorWithError(err, "sql.ManagedDatabaseSensitivityLabelsClient", "Get", nil, "Failure preparing request")
return
}
resp, err := client.GetSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "sql.ManagedDatabaseSensitivityLabelsClient", "Get", resp, "Failure sending request")
return
}
result, err = client.GetResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "sql.ManagedDatabaseSensitivityLabelsClient", "Get", resp, "Failure responding to request")
return
}
return
}
// GetPreparer prepares the Get request.
func (client ManagedDatabaseSensitivityLabelsClient) GetPreparer(ctx context.Context, resourceGroupName string, managedInstanceName string, databaseName string, schemaName string, tableName string, columnName string, sensitivityLabelSource SensitivityLabelSource) (*http.Request, error) {
pathParameters := map[string]interface{}{
"columnName": autorest.Encode("path", columnName),
"databaseName": autorest.Encode("path", databaseName),
"managedInstanceName": autorest.Encode("path", managedInstanceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"schemaName": autorest.Encode("path", schemaName),
"sensitivityLabelSource": autorest.Encode("path", sensitivityLabelSource),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"tableName": autorest.Encode("path", tableName),
}
const APIVersion = "2020-11-01-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}/schemas/{schemaName}/tables/{tableName}/columns/{columnName}/sensitivityLabels/{sensitivityLabelSource}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client ManagedDatabaseSensitivityLabelsClient) GetSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// GetResponder handles the response to the Get request. The method always
// closes the http.Response Body.
func (client ManagedDatabaseSensitivityLabelsClient) GetResponder(resp *http.Response) (result SensitivityLabel, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListCurrentByDatabase gets the sensitivity labels of a given database
// Parameters:
// resourceGroupName - the name of the resource group that contains the resource. You can obtain this value
// from the Azure Resource Manager API or the portal.
// managedInstanceName - the name of the managed instance.
// databaseName - the name of the database.
// filter - an OData filter expression that filters elements in the collection.
func (client ManagedDatabaseSensitivityLabelsClient) ListCurrentByDatabase(ctx context.Context, resourceGroupName string, managedInstanceName string, databaseName string, skipToken string, count *bool, filter string) (result SensitivityLabelListResultPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ManagedDatabaseSensitivityLabelsClient.ListCurrentByDatabase")
defer func() {
sc := -1
if result.sllr.Response.Response != nil {
sc = result.sllr.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.fn = client.listCurrentByDatabaseNextResults
req, err := client.ListCurrentByDatabasePreparer(ctx, resourceGroupName, managedInstanceName, databaseName, skipToken, count, filter)
if err != nil {
err = autorest.NewErrorWithError(err, "sql.ManagedDatabaseSensitivityLabelsClient", "ListCurrentByDatabase", nil, "Failure preparing request")
return
}
resp, err := client.ListCurrentByDatabaseSender(req)
if err != nil {
result.sllr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "sql.ManagedDatabaseSensitivityLabelsClient", "ListCurrentByDatabase", resp, "Failure sending request")
return
}
result.sllr, err = client.ListCurrentByDatabaseResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "sql.ManagedDatabaseSensitivityLabelsClient", "ListCurrentByDatabase", resp, "Failure responding to request")
return
}
if result.sllr.hasNextLink() && result.sllr.IsEmpty() {
err = result.NextWithContext(ctx)
return
}
return
}
// ListCurrentByDatabasePreparer prepares the ListCurrentByDatabase request.
func (client ManagedDatabaseSensitivityLabelsClient) ListCurrentByDatabasePreparer(ctx context.Context, resourceGroupName string, managedInstanceName string, databaseName string, skipToken string, count *bool, filter string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"databaseName": autorest.Encode("path", databaseName),
"managedInstanceName": autorest.Encode("path", managedInstanceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2020-11-01-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
if len(skipToken) > 0 {
queryParameters["$skipToken"] = autorest.Encode("query", skipToken)
}
if count != nil {
queryParameters["$count"] = autorest.Encode("query", *count)
}
if len(filter) > 0 {
queryParameters["$filter"] = autorest.Encode("query", filter)
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}/currentSensitivityLabels", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListCurrentByDatabaseSender sends the ListCurrentByDatabase request. The method will close the
// http.Response Body if it receives an error.
func (client ManagedDatabaseSensitivityLabelsClient) ListCurrentByDatabaseSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListCurrentByDatabaseResponder handles the response to the ListCurrentByDatabase request. The method always
// closes the http.Response Body.
func (client ManagedDatabaseSensitivityLabelsClient) ListCurrentByDatabaseResponder(resp *http.Response) (result SensitivityLabelListResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listCurrentByDatabaseNextResults retrieves the next set of results, if any.
func (client ManagedDatabaseSensitivityLabelsClient) listCurrentByDatabaseNextResults(ctx context.Context, lastResults SensitivityLabelListResult) (result SensitivityLabelListResult, err error) {
req, err := lastResults.sensitivityLabelListResultPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "sql.ManagedDatabaseSensitivityLabelsClient", "listCurrentByDatabaseNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListCurrentByDatabaseSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "sql.ManagedDatabaseSensitivityLabelsClient", "listCurrentByDatabaseNextResults", resp, "Failure sending next results request")
}
result, err = client.ListCurrentByDatabaseResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "sql.ManagedDatabaseSensitivityLabelsClient", "listCurrentByDatabaseNextResults", resp, "Failure responding to next results request")
}
return
}
// ListCurrentByDatabaseComplete enumerates all values, automatically crossing page boundaries as required.
func (client ManagedDatabaseSensitivityLabelsClient) ListCurrentByDatabaseComplete(ctx context.Context, resourceGroupName string, managedInstanceName string, databaseName string, skipToken string, count *bool, filter string) (result SensitivityLabelListResultIterator, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ManagedDatabaseSensitivityLabelsClient.ListCurrentByDatabase")
defer func() {
sc := -1
if result.Response().Response.Response != nil {
sc = result.page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.page, err = client.ListCurrentByDatabase(ctx, resourceGroupName, managedInstanceName, databaseName, skipToken, count, filter)
return
}
// ListRecommendedByDatabase gets the sensitivity labels of a given database
// Parameters:
// resourceGroupName - the name of the resource group that contains the resource. You can obtain this value
// from the Azure Resource Manager API or the portal.
// managedInstanceName - the name of the managed instance.
// databaseName - the name of the database.
// includeDisabledRecommendations - specifies whether to include disabled recommendations or not.
// filter - an OData filter expression that filters elements in the collection.
func (client ManagedDatabaseSensitivityLabelsClient) ListRecommendedByDatabase(ctx context.Context, resourceGroupName string, managedInstanceName string, databaseName string, skipToken string, includeDisabledRecommendations *bool, filter string) (result SensitivityLabelListResultPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ManagedDatabaseSensitivityLabelsClient.ListRecommendedByDatabase")
defer func() {
sc := -1
if result.sllr.Response.Response != nil {
sc = result.sllr.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.fn = client.listRecommendedByDatabaseNextResults
req, err := client.ListRecommendedByDatabasePreparer(ctx, resourceGroupName, managedInstanceName, databaseName, skipToken, includeDisabledRecommendations, filter)
if err != nil {
err = autorest.NewErrorWithError(err, "sql.ManagedDatabaseSensitivityLabelsClient", "ListRecommendedByDatabase", nil, "Failure preparing request")
return
}
resp, err := client.ListRecommendedByDatabaseSender(req)
if err != nil {
result.sllr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "sql.ManagedDatabaseSensitivityLabelsClient", "ListRecommendedByDatabase", resp, "Failure sending request")
return
}
result.sllr, err = client.ListRecommendedByDatabaseResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "sql.ManagedDatabaseSensitivityLabelsClient", "ListRecommendedByDatabase", resp, "Failure responding to request")
return
}
if result.sllr.hasNextLink() && result.sllr.IsEmpty() {
err = result.NextWithContext(ctx)
return
}
return
}
// ListRecommendedByDatabasePreparer prepares the ListRecommendedByDatabase request.
func (client ManagedDatabaseSensitivityLabelsClient) ListRecommendedByDatabasePreparer(ctx context.Context, resourceGroupName string, managedInstanceName string, databaseName string, skipToken string, includeDisabledRecommendations *bool, filter string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"databaseName": autorest.Encode("path", databaseName),
"managedInstanceName": autorest.Encode("path", managedInstanceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2020-11-01-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
if len(skipToken) > 0 {
queryParameters["$skipToken"] = autorest.Encode("query", skipToken)
}
if includeDisabledRecommendations != nil {
queryParameters["includeDisabledRecommendations"] = autorest.Encode("query", *includeDisabledRecommendations)
}
if len(filter) > 0 {
queryParameters["$filter"] = autorest.Encode("query", filter)
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}/recommendedSensitivityLabels", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListRecommendedByDatabaseSender sends the ListRecommendedByDatabase request. The method will close the
// http.Response Body if it receives an error.
func (client ManagedDatabaseSensitivityLabelsClient) ListRecommendedByDatabaseSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListRecommendedByDatabaseResponder handles the response to the ListRecommendedByDatabase request. The method always
// closes the http.Response Body.
func (client ManagedDatabaseSensitivityLabelsClient) ListRecommendedByDatabaseResponder(resp *http.Response) (result SensitivityLabelListResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listRecommendedByDatabaseNextResults retrieves the next set of results, if any.
func (client ManagedDatabaseSensitivityLabelsClient) listRecommendedByDatabaseNextResults(ctx context.Context, lastResults SensitivityLabelListResult) (result SensitivityLabelListResult, err error) {
req, err := lastResults.sensitivityLabelListResultPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "sql.ManagedDatabaseSensitivityLabelsClient", "listRecommendedByDatabaseNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListRecommendedByDatabaseSender(req)
if err != nil |
result, err = client.ListRecommendedByDatabaseResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "sql.ManagedDatabaseSensitivityLabelsClient", "listRecommendedByDatabaseNextResults", resp, "Failure responding to next results request")
}
return
}
// ListRecommendedByDatabaseComplete enumerates all values, automatically crossing page boundaries as required.
func (client ManagedDatabaseSensitivityLabelsClient) ListRecommendedByDatabaseComplete(ctx context.Context, resourceGroupName string, managedInstanceName string, databaseName string, skipToken string, includeDisabledRecommendations *bool, filter string) (result SensitivityLabelListResultIterator, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ManagedDatabaseSensitivityLabelsClient.ListRecommendedByDatabase")
defer func() {
sc := -1
if result.Response().Response.Response != nil {
sc = result.page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.page, err = client.ListRecommendedByDatabase(ctx, resourceGroupName, managedInstanceName, databaseName, skipToken, includeDisabledRecommendations, filter)
return
}
// Update update sensitivity labels of a given database using an operations batch.
// Parameters:
// resourceGroupName - the name of the resource group that contains the resource. You can obtain this value
// from the Azure Resource Manager API or the portal.
// managedInstanceName - the name of the managed instance.
// databaseName - the name of the database.
func (client ManagedDatabaseSensitivityLabelsClient) Update(ctx context.Context, resourceGroupName string, managedInstanceName string, databaseName string, parameters SensitivityLabelUpdateList) (result autorest.Response, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ManagedDatabaseSensitivityLabelsClient.Update")
defer func() {
sc := -1
if result.Response != nil {
sc = result.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.UpdatePreparer(ctx, resourceGroupName, managedInstanceName, databaseName, parameters)
if err != nil {
err = autorest.NewErrorWithError(err, "sql.ManagedDatabaseSensitivityLabelsClient", "Update", nil, "Failure preparing request")
return
}
resp, err := client.UpdateSender(req)
if err != nil {
result.Response = resp
err = autorest.NewErrorWithError(err, "sql.ManagedDatabaseSensitivityLabelsClient", "Update", resp, "Failure sending request")
return
}
result, err = client.UpdateResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "sql.ManagedDatabaseSensitivityLabelsClient", "Update", resp, "Failure responding to request")
return
}
return
}
// UpdatePreparer prepares the Update request.
func (client ManagedDatabaseSensitivityLabelsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, managedInstanceName string, databaseName string, parameters SensitivityLabelUpdateList) (*http.Request, error) {
pathParameters := map[string]interface{}{
"databaseName": autorest.Encode("path", databaseName),
"managedInstanceName": autorest.Encode("path", managedInstanceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2020-11-01-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPatch(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}/currentSensitivityLabels", pathParameters),
autorest.WithJSON(parameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// UpdateSender sends the Update request. The method will close the
// http.Response Body if it receives an error.
func (client ManagedDatabaseSensitivityLabelsClient) UpdateSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// UpdateResponder handles the response to the Update request. The method always
// closes the http.Response Body.
func (client ManagedDatabaseSensitivityLabelsClient) UpdateResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByClosing())
result.Response = resp
return
}
| {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "sql.ManagedDatabaseSensitivityLabelsClient", "listRecommendedByDatabaseNextResults", resp, "Failure sending next results request")
} |
context.go | // Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sessionctx
import (
"context"
"fmt"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/metrics"
"github.com/pingcap/tidb/owner"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/util"
"github.com/pingcap/tidb/util/kvcache"
"github.com/pingcap/tidb/util/sli"
"github.com/pingcap/tidb/util/topsql/stmtstats"
"github.com/pingcap/tipb/go-binlog"
"github.com/tikv/client-go/v2/oracle"
)
// InfoschemaMetaVersion is a workaround. Due to circular dependency,
// can not return the complete interface. But SchemaMetaVersion is widely used for logging.
// So we give a convenience for that.
// FIXME: remove this interface
type InfoschemaMetaVersion interface {
SchemaMetaVersion() int64
}
// Context is an interface for transaction and executive args environment.
type Context interface {
// NewTxn creates a new transaction for further execution.
// If old transaction is valid, it is committed first.
// It's used in BEGIN statement and DDL statements to commit old transaction.
NewTxn(context.Context) error
// NewStaleTxnWithStartTS initializes a staleness transaction with the given StartTS.
NewStaleTxnWithStartTS(ctx context.Context, startTS uint64) error
// Txn returns the current transaction which is created before executing a statement.
// The returned kv.Transaction is not nil, but it maybe pending or invalid.
// If the active parameter is true, call this function will wait for the pending txn
// to become valid.
Txn(active bool) (kv.Transaction, error)
// GetClient gets a kv.Client.
GetClient() kv.Client
// GetMPPClient gets a kv.MPPClient. | SetValue(key fmt.Stringer, value interface{})
// Value returns the value associated with this context for key.
Value(key fmt.Stringer) interface{}
// ClearValue clears the value associated with this context for key.
ClearValue(key fmt.Stringer)
// Deprecated: Use TxnManager.GetTxnInfoSchema to get the current schema in session
GetInfoSchema() InfoschemaMetaVersion
GetSessionVars() *variable.SessionVars
GetSessionManager() util.SessionManager
// RefreshTxnCtx commits old transaction without retry,
// and creates a new transaction.
// now just for load data and batch insert.
RefreshTxnCtx(context.Context) error
// RefreshVars refreshes modified global variable to current session.
// only used to daemon session like `statsHandle` to detect global variable change.
RefreshVars(context.Context) error
// InitTxnWithStartTS initializes a transaction with startTS.
// It should be called right before we builds an executor.
InitTxnWithStartTS(startTS uint64) error
// GetSnapshotWithTS returns a snapshot with start ts
GetSnapshotWithTS(ts uint64) kv.Snapshot
// GetStore returns the store of session.
GetStore() kv.Storage
// PreparedPlanCache returns the cache of the physical plan
PreparedPlanCache() *kvcache.SimpleLRUCache
// StoreQueryFeedback stores the query feedback.
StoreQueryFeedback(feedback interface{})
// UpdateColStatsUsage updates the column stats usage.
// TODO: maybe we can use a method called GetSessionStatsCollector to replace both StoreQueryFeedback and UpdateColStatsUsage but we need to deal with import circle if we do so.
UpdateColStatsUsage(predicateColumns []model.TableColumnID)
// HasDirtyContent checks whether there's dirty update on the given table.
HasDirtyContent(tid int64) bool
// StmtCommit flush all changes by the statement to the underlying transaction.
StmtCommit()
// StmtRollback provides statement level rollback.
StmtRollback()
// StmtGetMutation gets the binlog mutation for current statement.
StmtGetMutation(int64) *binlog.TableMutation
// DDLOwnerChecker returns owner.DDLOwnerChecker.
DDLOwnerChecker() owner.DDLOwnerChecker
// AddTableLock adds table lock to the session lock map.
AddTableLock([]model.TableLockTpInfo)
// ReleaseTableLocks releases table locks in the session lock map.
ReleaseTableLocks(locks []model.TableLockTpInfo)
// ReleaseTableLockByTableIDs releases table locks in the session lock map by table IDs.
ReleaseTableLockByTableIDs(tableIDs []int64)
// CheckTableLocked checks the table lock.
CheckTableLocked(tblID int64) (bool, model.TableLockType)
// GetAllTableLocks gets all table locks table id and db id hold by the session.
GetAllTableLocks() []model.TableLockTpInfo
// ReleaseAllTableLocks releases all table locks hold by the session.
ReleaseAllTableLocks()
// HasLockedTables uses to check whether this session locked any tables.
HasLockedTables() bool
// PrepareTSFuture uses to prepare timestamp by future.
PrepareTSFuture(ctx context.Context)
// StoreIndexUsage stores the index usage information.
StoreIndexUsage(tblID int64, idxID int64, rowsSelected int64)
// GetTxnWriteThroughputSLI returns the TxnWriteThroughputSLI.
GetTxnWriteThroughputSLI() *sli.TxnWriteThroughputSLI
// GetBuiltinFunctionUsage returns the BuiltinFunctionUsage of current Context, which is not thread safe.
// Use primitive map type to prevent circular import. Should convert it to telemetry.BuiltinFunctionUsage before using.
GetBuiltinFunctionUsage() map[string]uint32
// BuiltinFunctionUsageInc increase the counting of each builtin function usage
// Notice that this is a thread safe function
BuiltinFunctionUsageInc(scalarFuncSigName string)
// GetStmtStats returns stmtstats.StatementStats owned by implementation.
GetStmtStats() *stmtstats.StatementStats
// ShowProcess returns ProcessInfo running in current Context
ShowProcess() *util.ProcessInfo
}
type basicCtxType int
func (t basicCtxType) String() string {
switch t {
case QueryString:
return "query_string"
case Initing:
return "initing"
case LastExecuteDDL:
return "last_execute_ddl"
}
return "unknown"
}
// Context keys.
const (
// QueryString is the key for original query string.
QueryString basicCtxType = 1
// Initing is the key for indicating if the server is running bootstrap or upgrade job.
Initing basicCtxType = 2
// LastExecuteDDL is the key for whether the session execute a ddl command last time.
LastExecuteDDL basicCtxType = 3
)
// ValidateSnapshotReadTS strictly validates that readTS does not exceed the PD timestamp
func ValidateSnapshotReadTS(ctx context.Context, sctx Context, readTS uint64) error {
latestTS, err := sctx.GetStore().GetOracle().GetLowResolutionTimestamp(ctx, &oracle.Option{TxnScope: oracle.GlobalTxnScope})
// If we fail to get latestTS or the readTS exceeds it, get a timestamp from PD to double check
if err != nil || readTS > latestTS {
metrics.ValidateReadTSFromPDCount.Inc()
currentVer, err := sctx.GetStore().CurrentVersion(oracle.GlobalTxnScope)
if err != nil {
return errors.Errorf("fail to validate read timestamp: %v", err)
}
if readTS > currentVer.Ver {
return errors.Errorf("cannot set read timestamp to a future time")
}
}
return nil
}
// How far future from now ValidateStaleReadTS allows at most
const allowedTimeFromNow = 100 * time.Millisecond
// ValidateStaleReadTS validates that readTS does not exceed the current time not strictly.
func ValidateStaleReadTS(ctx context.Context, sctx Context, readTS uint64) error {
currentTS, err := sctx.GetStore().GetOracle().GetStaleTimestamp(ctx, oracle.GlobalTxnScope, 0)
// If we fail to calculate currentTS from local time, fallback to get a timestamp from PD
if err != nil {
metrics.ValidateReadTSFromPDCount.Inc()
currentVer, err := sctx.GetStore().CurrentVersion(oracle.GlobalTxnScope)
if err != nil {
return errors.Errorf("fail to validate read timestamp: %v", err)
}
currentTS = currentVer.Ver
}
if oracle.GetTimeFromTS(readTS).After(oracle.GetTimeFromTS(currentTS).Add(allowedTimeFromNow)) {
return errors.Errorf("cannot set read timestamp to a future time")
}
return nil
}
// SysProcTracker is used to track background sys processes
type SysProcTracker interface {
Track(id uint64, proc Context) error
UnTrack(id uint64)
GetSysProcessList() map[uint64]*util.ProcessInfo
KillSysProcess(id uint64)
} | GetMPPClient() kv.MPPClient
// SetValue saves a value associated with this context for key. |
send_cancel.rs | use quote::{format_ident, quote};
use syn::parse::{Parse, ParseStream};
use syn::{Result, Token};
#[derive(Debug)]
pub struct SendCancelMacroInput {
func_name: syn::Ident,
name: syn::Ident,
meshedchannels_name: syn::Ident,
nsessions: u64,
msg: syn::Expr,
}
impl Parse for SendCancelMacroInput {
fn parse(input: ParseStream) -> Result<Self> {
let func_name = syn::Ident::parse(input)?;
<Token![,]>::parse(input)?;
let name = syn::Ident::parse(input)?;
<Token![,]>::parse(input)?;
let meshedchannels_name = syn::Ident::parse(input)?;
<Token![,]>::parse(input)?;
let nsessions = (syn::LitInt::parse(input)?).base10_parse::<u64>().unwrap();
<Token![,]>::parse(input)?;
let msg = syn::Expr::parse(input)?;
Ok(SendCancelMacroInput {
func_name,
name,
meshedchannels_name,
nsessions,
msg,
})
}
}
impl From<SendCancelMacroInput> for proc_macro2::TokenStream {
fn from(input: SendCancelMacroInput) -> proc_macro2::TokenStream {
input.expand()
}
}
impl SendCancelMacroInput {
fn expand(&self) -> proc_macro2::TokenStream {
// Get the basic elements
let func_name = self.func_name.clone();
let name = self.name.clone();
let meshedchannels_name = self.meshedchannels_name.clone();
let msg = self.msg.clone();
// Build the vec with all the types S1,..,SN
let session_types: Vec<syn::Ident> = (1..(self.nsessions - 1))
.map(|i| format_ident!("S{}", i))
.collect();
quote! {
fn #func_name<#( #session_types , )* R>(
s: #meshedchannels_name< | #( #session_types , )*
R,
#name<mpstthree::role::end::RoleEnd>,
>,
) -> std::result::Result<(), Box<dyn std::error::Error>>
where
#( #session_types : mpstthree::binary::struct_trait::session::Session , )*
R: mpstthree::role::Role,
{
s.session1.sender.send(mpstthree::binary::struct_trait::end::Signal::Cancel).unwrap();;
mpstthree::binary::cancel::cancel(s);
panic!("{:?}", #msg);
}
}
}
} | mpstthree::binary::struct_trait::end::End, |
managers.py | from django.db import models
| workflow.launch_for(document) | class WorkflowManager(models.Manager):
def launch_for(self, document):
for workflow in document.document_type.workflows.all(): |
livescript.py | from __future__ import unicode_literals
from pipeline.conf import settings
from pipeline.compilers import SubProcessCompiler
class LiveScriptCompiler(SubProcessCompiler):
output_extension = 'js'
def match_file(self, path):
return path.endswith('.ls')
def compile_file(self, infile, outfile, outdated=False, force=False):
if not outdated and not force:
return # File doesn't need to be recompiled
command = ( | "-cp",
settings.LIVE_SCRIPT_ARGUMENTS,
infile,
)
return self.execute_command(command, stdout_captured=outfile) | settings.LIVE_SCRIPT_BINARY, |
device.go | //
// Copyright (c) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
package main
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"syscall"
"time"
"github.com/kata-containers/agent/pkg/uevent"
pb "github.com/kata-containers/agent/protocols/grpc"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
"google.golang.org/grpc/codes"
grpcStatus "google.golang.org/grpc/status"
)
const (
driver9pType = "9p"
driverBlkType = "blk"
driverSCSIType = "scsi"
driverEphemeralType = "ephemeral"
)
const (
rootBusPath = "/devices/pci0000:00"
pciBusMode = 0220
)
var (
sysBusPrefix = sysfsDir + "/bus/pci/devices"
pciBusRescanFile = sysfsDir + "/bus/pci/rescan"
pciBusPathFormat = "%s/%s/pci_bus/"
systemDevPath = "/dev"
)
// SCSI variables
var (
// Here in "0:0", the first number is the SCSI host number because
// only one SCSI controller has been plugged, while the second number
// is always 0.
scsiHostChannel = "0:0:"
sysClassPrefix = sysfsDir + "/class"
scsiDiskPrefix = filepath.Join(sysClassPrefix, "scsi_disk", scsiHostChannel)
scsiBlockSuffix = "block"
scsiDiskSuffix = filepath.Join("/device", scsiBlockSuffix)
scsiHostPath = filepath.Join(sysClassPrefix, "scsi_host")
)
type deviceHandler func(device pb.Device, spec *pb.Spec, s *sandbox) error
var deviceHandlerList = map[string]deviceHandler{
driverBlkType: virtioBlkDeviceHandler,
driverSCSIType: virtioSCSIDeviceHandler,
}
func | () error {
return ioutil.WriteFile(pciBusRescanFile, []byte{'1'}, pciBusMode)
}
// getDevicePCIAddress fetches the complete PCI address in sysfs, based on the PCI
// identifier provided. This should be in the format: "bridgeAddr/deviceAddr".
// Here, bridgeAddr is the address at which the brige is attached on the root bus,
// while deviceAddr is the address at which the device is attached on the bridge.
func getDevicePCIAddress(pciID string) (string, error) {
tokens := strings.Split(pciID, "/")
if len(tokens) != 2 {
return "", fmt.Errorf("PCI Identifier for device should be of format [bridgeAddr/deviceAddr], got %s", pciID)
}
bridgeID := tokens[0]
deviceID := tokens[1]
// Deduce the complete bridge address based on the bridge address identifier passed
// and the fact that bridges are attached on the main bus with function 0.
pciBridgeAddr := fmt.Sprintf("0000:00:%s.0", bridgeID)
// Find out the bus exposed by bridge
bridgeBusPath := fmt.Sprintf(pciBusPathFormat, sysBusPrefix, pciBridgeAddr)
files, err := ioutil.ReadDir(bridgeBusPath)
if err != nil {
return "", fmt.Errorf("Error with getting bridge pci bus : %s", err)
}
busNum := len(files)
if busNum != 1 {
return "", fmt.Errorf("Expected an entry for bus in %s, got %d entries instead", bridgeBusPath, busNum)
}
bus := files[0].Name()
// Device address is based on the bus of the bridge to which it is attached.
// We do not pass devices as multifunction, hence the trailing 0 in the address.
pciDeviceAddr := fmt.Sprintf("%s:%s.0", bus, deviceID)
bridgeDevicePCIAddr := fmt.Sprintf("%s/%s", pciBridgeAddr, pciDeviceAddr)
agentLog.WithField("completePCIAddr", bridgeDevicePCIAddr).Info("Fetched PCI address for device")
return bridgeDevicePCIAddr, nil
}
func getPCIDeviceName(s *sandbox, pciID string) (string, error) {
pciAddr, err := getDevicePCIAddress(pciID)
if err != nil {
return "", err
}
var devName string
var notifyChan chan string
fieldLogger := agentLog.WithField("pciID", pciID)
// Check if the PCI identifier is in PCI device map.
s.Lock()
for key, value := range s.pciDeviceMap {
if strings.Contains(key, pciAddr) {
devName = value
fieldLogger.Info("Device found in pci device map")
break
}
}
// Rescan pci bus if we need to wait for a new pci device
if err = rescanPciBus(); err != nil {
fieldLogger.WithError(err).Error("Failed to scan pci bus")
s.Unlock()
return "", err
}
// If device is not found in the device map, hotplug event has not
// been received yet, create and add channel to the watchers map.
// The key of the watchers map is the device we are interested in.
// Note this is done inside the lock, not to miss any events from the
// global udev listener.
if devName == "" {
notifyChan = make(chan string, 1)
s.deviceWatchers[pciAddr] = notifyChan
}
s.Unlock()
if devName == "" {
fieldLogger.Info("Waiting on channel for device notification")
select {
case devName = <-notifyChan:
case <-time.After(time.Duration(timeoutHotplug) * time.Second):
s.Lock()
delete(s.deviceWatchers, pciAddr)
s.Unlock()
return "", grpcStatus.Errorf(codes.DeadlineExceeded,
"Timeout reached after %ds waiting for device %s",
timeoutHotplug, pciAddr)
}
}
return filepath.Join(systemDevPath, devName), nil
}
// device.Id should be the PCI address in the format "bridgeAddr/deviceAddr".
// Here, bridgeAddr is the address at which the brige is attached on the root bus,
// while deviceAddr is the address at which the device is attached on the bridge.
func virtioBlkDeviceHandler(device pb.Device, spec *pb.Spec, s *sandbox) error {
// Get the device node path based on the PCI device address
devPath, err := getPCIDeviceName(s, device.Id)
if err != nil {
return err
}
device.VmPath = devPath
return updateSpecDeviceList(device, spec)
}
// device.Id should be the SCSI address of the disk in the format "scsiID:lunID"
func virtioSCSIDeviceHandler(device pb.Device, spec *pb.Spec, s *sandbox) error {
// Retrieve the device path from SCSI address.
devPath, err := getSCSIDevPath(device.Id)
if err != nil {
return err
}
device.VmPath = devPath
return updateSpecDeviceList(device, spec)
}
// updateSpecDeviceList takes a device description provided by the caller,
// trying to find it on the guest. Once this device has been identified, the
// "real" information that can be read from inside the VM is used to update
// the same device in the list of devices provided through the OCI spec.
// This is needed to update information about minor/major numbers that cannot
// be predicted from the caller.
func updateSpecDeviceList(device pb.Device, spec *pb.Spec) error {
// If no ContainerPath is provided, we won't be able to match and
// update the device in the OCI spec device list. This is an error.
if device.ContainerPath == "" {
return grpcStatus.Errorf(codes.Internal,
"ContainerPath cannot be empty")
}
if spec.Linux == nil || len(spec.Linux.Devices) == 0 {
return grpcStatus.Errorf(codes.Internal,
"No devices found from the spec, cannot update")
}
stat := syscall.Stat_t{}
if err := syscall.Stat(device.VmPath, &stat); err != nil {
return err
}
dev := stat.Rdev
major := int64(unix.Major(dev))
minor := int64(unix.Minor(dev))
agentLog.WithFields(logrus.Fields{
"device-path": device.VmPath,
"device-major": major,
"device-minor": minor,
}).Info("handling block device")
// Update the spec
for idx, d := range spec.Linux.Devices {
if d.Path == device.ContainerPath {
hostMajor := spec.Linux.Devices[idx].Major
hostMinor := spec.Linux.Devices[idx].Minor
agentLog.WithFields(logrus.Fields{
"device-path": device.VmPath,
"host-device-major": hostMajor,
"host-device-minor": hostMinor,
"guest-device-major": major,
"guest-device-minor": minor,
}).Info("updating block device major/minor into the spec")
spec.Linux.Devices[idx].Major = major
spec.Linux.Devices[idx].Minor = minor
// there is no resource to update
if spec.Linux == nil || spec.Linux.Resources == nil {
return nil
}
// Resources must be updated since they are used to identify the
// device in the devices cgroup.
for idxRsrc, dRsrc := range spec.Linux.Resources.Devices {
if dRsrc.Major == hostMajor && dRsrc.Minor == hostMinor {
spec.Linux.Resources.Devices[idxRsrc].Major = major
spec.Linux.Resources.Devices[idxRsrc].Minor = minor
}
}
return nil
}
}
return grpcStatus.Errorf(codes.Internal,
"Should have found a matching device %s in the spec",
device.VmPath)
}
type checkUeventCb func(uEv *uevent.Uevent) bool
func waitForDevice(devicePath, deviceName string, checkUevent checkUeventCb) error {
uEvHandler, err := uevent.NewHandler()
if err != nil {
return err
}
defer uEvHandler.Close()
fieldLogger := agentLog.WithField("device", deviceName)
// Check if the device already exists.
if _, err := os.Stat(devicePath); err == nil {
fieldLogger.Info("Device already hotplugged, quit listening")
return nil
}
fieldLogger.Info("Started listening for uevents for device hotplug")
// Channel to signal when desired uevent has been received.
done := make(chan bool)
go func() {
// This loop will be either ended if the hotplugged device is
// found by listening to the netlink socket, or it will end
// after the function returns and the uevent handler is closed.
for {
uEv, err := uEvHandler.Read()
if err != nil {
fieldLogger.Error(err)
continue
}
fieldLogger = fieldLogger.WithFields(logrus.Fields{
"uevent-action": uEv.Action,
"uevent-devpath": uEv.DevPath,
"uevent-subsystem": uEv.SubSystem,
"uevent-seqnum": uEv.SeqNum,
})
fieldLogger.Info("Got uevent")
if checkUevent(uEv) {
fieldLogger.Info("Hotplug event received")
break
}
}
close(done)
}()
select {
case <-done:
case <-time.After(time.Duration(timeoutHotplug) * time.Second):
return grpcStatus.Errorf(codes.DeadlineExceeded,
"Timeout reached after %ds waiting for device %s",
timeoutHotplug, deviceName)
}
return nil
}
// scanSCSIBus scans SCSI bus for the given SCSI address(SCSI-Id and LUN)
func scanSCSIBus(scsiAddr string) error {
files, err := ioutil.ReadDir(scsiHostPath)
if err != nil {
return err
}
tokens := strings.Split(scsiAddr, ":")
if len(tokens) != 2 {
return grpcStatus.Errorf(codes.Internal,
"Unexpected format for SCSI Address : %s, expect SCSIID:LUN",
scsiAddr)
}
// Scan scsi host passing in the channel, SCSI id and LUN. Channel
// is always 0 because we have only one SCSI controller.
scanData := []byte(fmt.Sprintf("0 %s %s", tokens[0], tokens[1]))
for _, file := range files {
host := file.Name()
scanPath := filepath.Join(scsiHostPath, host, "scan")
if err := ioutil.WriteFile(scanPath, scanData, 0200); err != nil {
return err
}
}
return nil
}
// findSCSIDisk finds the SCSI disk name associated with the given SCSI path.
// This approach eliminates the need to predict the disk name on the host side,
// but we do need to rescan SCSI bus for this.
func findSCSIDisk(scsiPath string) (string, error) {
files, err := ioutil.ReadDir(scsiPath)
if err != nil {
return "", err
}
if len(files) != 1 {
return "", grpcStatus.Errorf(codes.Internal,
"Expecting a single SCSI device, found %v",
files)
}
return files[0].Name(), nil
}
// getSCSIDevPath scans SCSI bus looking for the provided SCSI address, then
// it waits for the SCSI disk to become available and returns the device path
// associated with the disk.
func getSCSIDevPath(scsiAddr string) (string, error) {
if err := scanSCSIBus(scsiAddr); err != nil {
return "", err
}
devPath := filepath.Join(scsiDiskPrefix+scsiAddr, scsiDiskSuffix)
checkUevent := func(uEv *uevent.Uevent) bool {
devSubPath := filepath.Join(scsiHostChannel+scsiAddr, scsiBlockSuffix)
return (uEv.Action == "add" &&
strings.Contains(uEv.DevPath, devSubPath))
}
if err := waitForDevice(devPath, scsiAddr, checkUevent); err != nil {
return "", err
}
scsiDiskName, err := findSCSIDisk(devPath)
if err != nil {
return "", err
}
return filepath.Join(devPrefix, scsiDiskName), nil
}
func addDevices(devices []*pb.Device, spec *pb.Spec, s *sandbox) error {
for _, device := range devices {
if device == nil {
continue
}
err := addDevice(device, spec, s)
if err != nil {
return err
}
}
return nil
}
func addDevice(device *pb.Device, spec *pb.Spec, s *sandbox) error {
if device == nil {
return grpcStatus.Error(codes.InvalidArgument, "invalid device")
}
if spec == nil {
return grpcStatus.Error(codes.InvalidArgument, "invalid spec")
}
// log before validation to help with debugging gRPC protocol
// version differences.
agentLog.WithFields(logrus.Fields{
"device-id": device.Id,
"device-type": device.Type,
"device-vm-path": device.VmPath,
"device-container-path": device.ContainerPath,
"device-options": device.Options,
}).Debug()
if device.Type == "" {
return grpcStatus.Errorf(codes.InvalidArgument,
"invalid type for device %v", device)
}
if device.Id == "" && device.VmPath == "" {
return grpcStatus.Errorf(codes.InvalidArgument,
"invalid ID and VM path for device %v", device)
}
if device.ContainerPath == "" {
return grpcStatus.Errorf(codes.InvalidArgument,
"invalid container path for device %v", device)
}
devHandler, ok := deviceHandlerList[device.Type]
if !ok {
return grpcStatus.Errorf(codes.InvalidArgument,
"Unknown device type %q", device.Type)
}
return devHandler(*device, spec, s)
}
| rescanPciBus |
test10.py | #from time import *
from grovepi import *
from paho.mqtt.client import *
buzzer = 3
pinMode(buzzer, "OUTPUT")
MQTT_BROKER = "192.168.56.1" #The ip address will be vary based on where and how you connect to the Internet
#MQTT_BROKER = "broker.emqx.io" #using public mqtt broker to act as subsriber
MQTT_TOPIC = "test"
def | (client, userdata, flags, rc):
print("Connected with result code " + str(rc))
client.subscribe(MQTT_TOPIC)
def on_message(client, userdata, msg):
print(msg.topic + " " + str(msg.payload))
try:
i = int(msg.payload)
print(i)
if i > 0 and i < 256:
analogWrite(buzzer, i)
except:
analogWrite(buzzer, 0)
client = Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect(MQTT_BROKER, 1883, 60)
client.loop_forever() | on_connect |
gatsby-ssr.js | /**
* Implement Gatsby's SSR (Server Side Rendering) APIs in this file.
*
* See: https://www.gatsbyjs.org/docs/ssr-apis/
*/
const React = require('react');
exports.onRenderBody = (
{ setHeadComponents, setPostBodyComponents },
) => {
setHeadComponents([
<link
key="plugin-docsearch-css"
rel="stylesheet"
href="https://cdn.jsdelivr.net/npm/docsearch.js@2/dist/cdn/docsearch.min.css" | ]);
setPostBodyComponents([
<script
key="plugin-docsearch-js"
type="text/javascript"
src="https://cdn.jsdelivr.net/npm/docsearch.js@2/dist/cdn/docsearch.min.js"
/>,
]);
}; | /> |
day_4.py | import os
import re
class | ():
mandatory_fields = [
'byr',
'iyr',
'eyr',
'hgt',
'hcl',
'ecl',
'pid'
]
optional_fields = [
'cid'
]
def check_height(height):
if not any(unit in height for unit in ["cm", "in"]):
return False
return (
150 <= int(height[:-2]) <= 193
if "cm" in height
else 59 <= int(height[:-2]) <= 76
)
fields_rules = {
"byr": lambda k: 1920 <= int(k) <= 2002,
"iyr": lambda k: 2010 <= int(k) <= 2020,
"eyr": lambda k: 2020 <= int(k) <= 2030,
"hgt": check_height,
"hcl": lambda k: re.match('^#[a-f\d]{6}$', k) is not None,
"ecl": lambda k: k in ["amb", "blu", "brn", "gry",
"grn", "hzl", "oth"],
"pid": lambda k: len(k) == 9,
}
def check_height(height: str) -> bool:
if not any(unit in height for unit in ["cm", "in"]):
return False
return (
150 <= int(height[:-2]) <= 193
if "cm" in height
else 59 <= int(height[:-2]) <= 76
)
valid_passports = 0
valid_passports_two = 0
def __init__(self):
self.lines = []
self.read_list()
def read_list(self):
with open('./_data/data_4.txt') as f:
contents = f.read()
self.lines = contents.split(os.linesep + os.linesep)
def is_valid_passport(self, passport):
return all(field in passport for field in self.fields_rules)
def is_valid_password_fields(self, passport):
if self.is_valid_passport(passport):
passport = dict(part.split(':') for part in passport.split(' '))
return all(self.fields_rules[field](passport[field])
for field in self.fields_rules)
def part_one(self):
for passport in self.lines:
passport = passport.replace('\n', ' ')
if self.is_valid_passport(passport):
self.valid_passports += 1
return self.valid_passports
def part_two(self):
for passport in self.lines:
passport = passport.replace('\n', ' ')
if self.is_valid_password_fields(passport):
self.valid_passports_two += 1
return self.valid_passports_two
day_four = DayFour()
print("Number of valid password: ")
print(day_four.part_one())
print("======================================")
print("Number of valid password part two: ")
print(day_four.part_two())
| DayFour |
parserclasses.py | """A set of classes used during the parsing of VB code"""
StopSearch = -9999 # Used to terminate searches for parent properties
class VBElement(object):
"""An element of VB code"""
def __init__(self, details, text):
"""Initialize from the details"""
# import pdb; pdb.set_trace()
self.name = details[0]
self.text = makeUnicodeFromSafe(text[details[1]:details[2]])
self.elements = convertToElements(details[3], text)
def printTree(self, offset=0):
"""Print out this tree"""
print "%s%s : '%s'" % (
" " * offset, self.name, self.text.split("\n")[:20])
for subelement in self.elements:
subelement.printTree(offset + 1)
class VBFailedElement(object):
"""An failed element of VB code"""
def __init__(self, name, text):
"""Initialize from the details"""
self.name = name
self.text = text
self.elements = []
class VBNamespace(object):
"""Handles a VB Namespace"""
auto_handlers = []
auto_class_handlers = None
#
# Skip handlers are automatically by-passed. This is useful for quickly ignoring a
# handler in a base class
skip_handlers = []
#
# Used to translate () into [] under certain circumstances (LHS of an
# assign)
brackets_are_indexes = 0
default_scope = "Private"
#
# Set this to 1 if the object is a function (ie requires () when accessing)
is_function = 0
#
# Set to 1 for types which would mark the end of the docstrings
would_end_docstring = 1
#
# Intrinsic VB functions - we need to know these to be able to convert
# bare references (eg Dir) to function references (Dir())
intrinsic_functions = [
"Dir", "FreeFile", "Rnd", "Timer",
]
def __init__(self, scope="Private"):
"""Initialize the namespace"""
self.locals = []
self.local_default_scope = self.default_scope
self.auto_class_handlers = {
"object_definition": (VBVariableDefinition, self.locals),
"const_definition": (VBConstant, self.locals),
"user_type_definition": (VBUserType, self.locals),
"event_definition": (VBUnrendered, self.locals),
}
#
# This dictionary stores names which are to be substituted if found
self.name_substitution = {}
char_spec = Config["General", "IndentCharacter"]
if char_spec == "Space":
self._indent_char = " "
elif char_spec == "Tab":
self._indent_char = "\t"
else:
raise InvalidOption(
"Indent character option not understood: '%s'" % char_spec)
self._indent_amount = int(Config["General", "IndentAmount"])
def amGlobal(self, scope):
"""Decide if a variable will be considered a global
The algorithm works by asking our parent for a 'public_is_global' flag.
If this is true and the scope is either 'public' or 'global' then we
are a global. It is up to each parent to decide if publics are global.
Things like code modules will have this set whereas things like
subroutines will not.
"""
#
# First throw out anything which is private
log.info("Checking if global: '%s' scope is '%s'" % (self, scope))
if scope in ("Public", "Global"):
if self.getParentProperty("public_is_global", 0):
log.info("We are global!")
return 1
return 0
def assignParent(self, parent):
"""Set our parent
This is kept as a separate method because it is a useful hook for
subclasses. Once this method is called, the object is fully
initialized.
"""
self.parent = parent
def asString(self):
"""Convert to a nice representation"""
return repr(self)
def checkIfFunction(self, name):
"""Check if the name is a function or not"""
for loc in self.locals:
if loc.identifier == name:
return loc.is_function
raise UnresolvableName("Name '%s' is not known in this context" % name)
def checkOptionChoice(self, section, name, choices):
"""Return the index of a config option in a list of choices
We return the actual choice name which may seem odd but is done to
make the code readable. The main purpose of this method is to allow
the choice to be selected with the error trapping hidden.
"""
value = Config[section, name]
try:
return choices[list(choices).index(value)]
except ValueError:
raise InvalidOption("Invalid option for %s.%s, must be one of %s" % (
section, name, choices))
def checkOptionYesNo(self, section, name):
"""Return the yes/no value of an option checking for invalid answers"""
return self.checkOptionChoice(section, name, ("Yes", "No"))
def containsStatements(self):
"""Check if we contain statements"""
#
# TODO: This needs refactoring - it is horrible
if isinstance(self, NonCodeBlocks):
return 0
if not hasattr(self, "blocks"):
return 1
elif self.blocks:
for item in self.blocks:
if item.containsStatements():
return 1
return 0
else:
return 1
def createExtractHandler(self, token):
"""Create a handler which will extract a certain token value"""
def handler(element):
log.info("Grabbed attribute '%s' for %s as '%s'" %
(token, self, element.text))
setattr(self, token, element.text)
return handler
def filterListByClass(self, sequence, cls):
"""Return all elements of sequence that are an instance of the given class"""
return [item for item in sequence if isinstance(item, cls)]
def finalizeObject(self):
"""Finalize the object
This method is called once the object has been completely parsed and can
be used to do any processing required.
"""
def findParentOfClass(self, cls):
"""Return our nearest parent who is a subclass of cls"""
try:
parent = self.parent
except AttributeError:
raise NestingError(
"Reached outer layer when looking for parent of class")
if isinstance(parent, cls):
return parent
else:
return parent.findParentOfClass(cls)
def getHandler(self, element):
"""Find a handler for the element"""
if element.name in self.skip_handlers:
return None
elif element.name in self.auto_handlers:
log.info("Found auto handler for '%s' ('%s')" %
(element.name, self))
return self.createExtractHandler(element.name)
elif element.name in self.auto_class_handlers:
log.info("Found auto handler for '%s' ('%s')" %
(element.name, self))
obj_class, add_to = self.auto_class_handlers[element.name]
if obj_class == self.__class__:
# Ooops, recursive handling - we should handle the sub elements
def class_handler(element):
for sub_element in element.elements:
self.handleSubObject(sub_element, obj_class, add_to)
else:
def class_handler(element):
self.handleSubObject(element, obj_class, add_to)
return class_handler
try:
return getattr(self, "handle_%s" % element.name)
except AttributeError:
return None
def getIndent(self, indent):
"""Return some spaces to do indenting"""
return self._indent_char * indent * self._indent_amount
def getLocalNameFor(self, name):
"""Get the local version of a name
We look for any ancestor with a name conversion in operation for this name and | """
try:
return self.name_substitution[name]
except KeyError:
try:
return self.parent.getLocalNameFor(name)
except AttributeError:
return name
def getParentProperty(self, name, default=None):
"""Get a property from our nearest ancestor who has it"""
try:
return getattr(self, name)
except AttributeError:
try:
parent = self.parent
return parent.getParentProperty(name)
except AttributeError:
if default is not None:
return default
raise NestingError(
"Reached outer level when trying to access a parent property: "
"'%s'" % name)
def getWarning(self, warning_type, text, indent=0, crlf=0):
"""Construct a warning comment"""
ret = "%s# %s (%s) %s" % (
self.getIndent(indent),
Config["General", "AttentionMarker"],
warning_type,
text)
if crlf:
ret += "\n"
return ret
def handleSubObject(self, element, obj_class, add_to):
"""Handle an object which creates a sub object"""
v = obj_class(self.local_default_scope)
v.processElement(element)
v.assignParent(self)
v.finalizeObject()
#
# Assume that we are supposed to add this to a list of items
# if this fails then perhaps this is an attribute we are supposed to
# set
try:
add_to.append(v)
except AttributeError:
setattr(self, add_to, v)
#
log.info("Added new %s to %s" % (obj_class, self.asString()))
def isAFunction(self, name):
"""Check if the name is a function or not
We traverse up through the nested namespaces until someone knows
the name and then see if they are a function.
"""
if name in self.intrinsic_functions:
return 1
try:
return self.checkIfFunction(name)
except UnresolvableName:
try:
return self.parent.isAFunction(name)
except (AttributeError, UnresolvableName):
return 0 # Nobody knew the name so we can't know if it is or not
def processElement(self, element):
"""Process our tree"""
handler = self.getHandler(element)
if handler:
handler(element)
else:
if element.elements:
for subelement in element.elements:
self.processElement(subelement)
else:
log.info("Unhandled element '%s' from %s\n%s" %
(element.name, self, element.text))
def registerAsGlobal(self):
"""Register ourselves as a global object
We try to add ourselves to our parents "global_objects" table. This may fail
if we are not owned by anything that has a global_obects table, as would be
the case for converting a simple block of text.
"""
try:
global_objects = self.getParentProperty("global_objects")
except NestingError:
log.warn(
"Tried to register global object but there was no suitable object table")
else:
global_objects[self.identifier] = self
log.info("Registered a new global object: '%s'" % self)
def registerImportRequired(self, modulename):
"""Register a need to import a certain module
When we need to use a variable from another module we need to tell our
module-like containner to add an 'import' statement. So we search for
such a container and try to add the module name to the import list.
It is possible (but unlikely) that we need the import but we are not in
a container. If this happens we just warning and carry on.
"""
try:
module_imports = self.getParentProperty("module_imports")
except NestingError:
log.warn(
"Tried to request a module import (%s)"
" but couldn't find a suitable container" %
modulename)
else:
if modulename not in module_imports:
module_imports.append(modulename)
log.info("Registered a new module import: '%s'" % modulename)
def renderAsCode(self, indent=0):
"""Render this element as code"""
return self.getIndent(indent) + "# Unrendered object %s\n" % (self.asString(), )
def resolveLocalName(self, name, rendering_locals=0, requestedby=None):
"""Convert a local name to a fully resolved name"""
raise UnresolvableName(
"Name '%s' is not known in this namespace" % name)
def resolveName(self, name, rendering_locals=None, requestedby=None):
"""Convert a local name to a fully resolved name
We traverse up through the nested namespaces until someone knows
what to do with the name. If nobody knows then we know if must be
a local so it keeps the same name.
"""
if rendering_locals is None:
rendering_locals = self.getParentProperty("rendering_locals")
if not requestedby:
requestedby = self
try:
return self.resolveLocalName(name, rendering_locals, requestedby=requestedby)
except UnresolvableName:
try:
return self.parent.resolveName(
name, rendering_locals, requestedby=requestedby)
except AttributeError:
return name # Nobody knew the name so it must be local
def searchParentProperty(self, name):
"""Search for any ancestor who has the named parameter set to true
Stop searching if someone has the property set to StopSearch
"""
try:
if getattr(self, name) == StopSearch:
return 0
elif getattr(self, name):
return 1
except AttributeError:
pass
try:
parent = self.parent
return parent.searchParentProperty(name)
except AttributeError:
return 0
def handle_scope(self, element):
"""Handle a scope definition"""
self.local_default_scope = element.text
log.info("Changed default scope to %s" % self.local_default_scope)
def handle_line_end(self, element):
"""Handle the end of a line"""
self.local_default_scope = self.default_scope
class VBConsumer(VBNamespace):
"""Consume and store elements"""
def processElement(self, element):
"""Eat this element"""
self.element = element
log.info("Consumed element: %s" % element)
class VBUnrendered(VBConsumer):
"""Represents an unrendered statement"""
would_end_docstring = 0
def renderAsCode(self, indent):
"""Render the unrendrable!"""
if self.checkOptionYesNo("General", "WarnAboutUnrenderedCode") == "Yes":
return self.getWarning(
"UntranslatedCode",
self.element.text.replace("\n", "\\n"), indent, crlf=1)
else:
return ""
class VBMessage(VBUnrendered):
"""Allows a message to be placed in the python output"""
def __init__(self, scope="Private", message="No message", messagetype="Unknown"):
"""Initialise the message"""
super(VBMessage, self).__init__(scope)
self.message = message
self.messagetype = messagetype
def renderAsCode(self, indent=0):
"""Render the message"""
return self.getWarning(self.messagetype,
self.message, indent, crlf=1)
class VBMissingArgument(VBConsumer):
"""Represents an missing argument"""
def renderAsCode(self, indent=0):
"""Render the unrendrable!"""
return "VBMissingArgument"
class VBCodeBlock(VBNamespace):
"""A block of VB code"""
def __init__(self, scope="Private"):
"""Initialize the block"""
super(VBCodeBlock, self).__init__()
self.blocks = []
self.auto_class_handlers.update({
"assignment_statement": (VBAssignment, self.blocks),
"lset_statement": (VBLSet, self.blocks),
"rset_statement": (VBRSet, self.blocks),
"set_statement": (VBSet, self.blocks),
"comment_body": (VBComment, self.blocks),
"vb2py_directive": (VB2PYDirective, self.blocks),
"if_statement": (VBIf, self.blocks),
"inline_if_statement": (VBInlineIf, self.blocks),
"select_statement": (VBSelect, self.blocks),
"exit_statement": (VBExitStatement, self.blocks),
"while_statement": (VBWhile, self.blocks),
"do_statement": (VBDo, self.blocks),
"redim_statement": (VBReDim, self.blocks),
"explicit_call_statement": (VBExplicitCall, self.blocks),
"implicit_call_statement": (VBCall, self.blocks),
"inline_implicit_call": (VBCall, self.blocks),
"label_statement": (VBLabel, self.blocks),
"with_statement": (VBWith, self.blocks),
"end_statement": (VBEnd, self.blocks),
"for_statement": (VBFor, self.blocks),
"inline_for_statement": (VBFor, self.blocks),
"for_each_statement": (VBForEach, self.blocks),
"open_statement": (VBOpen, self.blocks),
"close_statement": (VBClose, self.blocks),
"input_statement": (VBInput, self.blocks),
"print_statement": (VBPrint, self.blocks),
"line_input_statement": (VBLineInput, self.blocks),
"seek_statement": (VBSeek, self.blocks),
"name_statement": (VBName, self.blocks),
"attribute_statement": (VBUnrendered, self.blocks),
"resume_statement": (VBUnrendered, self.blocks),
"goto_statement": (VBUnrendered, self.blocks),
"on_statement": (VBUnrendered, self.blocks),
"external_declaration": (VBUnrendered, self.blocks),
"get_statement": (VBUnrendered, self.blocks),
"put_statement": (VBUnrendered, self.blocks),
"option_statement": (VBUnrendered, self.blocks),
"class_header_block": (VBUnrenderedBlock, self.blocks),
"parser_failure": (VBParserFailure, self.blocks),
})
def renderAsCode(self, indent=0):
"""Render this element as code"""
#
# Watch out for the block not containing any statements (could be all
# comments!)
if not self.containsStatements():
self.blocks.append(VBPass())
#
return "".join([block.renderAsCode(indent) for block in self.blocks])
class VBUnrenderedBlock(VBCodeBlock):
"""Represents an unrendered block"""
would_end_docstring = 0
def renderAsCode(self, indent):
"""Render the unrendrable!"""
return ""
class VBOptionalCodeBlock(VBCodeBlock):
"""A block of VB code which can be empty and still sytactically correct"""
def containsStatements(self, indent=0):
"""Return true if this block contains statements
We always return 1 here because it doesn't matter if we contain statements of not
"""
return 1
class VBVariable(VBNamespace):
"""Handles a VB Variable"""
auto_handlers = [
"scope",
"type",
"string_size_indicator",
"value",
"identifier",
"optional",
"new_keyword",
"preserve_keyword",
"implicit_object",
]
skip_handlers = [
"const_statement",
]
def __init__(self, scope="Private"):
"""Initialize the variable"""
super(VBVariable, self).__init__(scope)
self.identifier = None
self.scope = scope
self.type = "Variant"
self.size_definitions = []
self.value = None
self.optional = None
self.expression = VBMissingArgument()
self.new_keyword = None
self.preserve_keyword = None
self.string_size_indicator = None
self.object = None
self.implicit_object = None
self.unsized_definition = None
self.auto_class_handlers = {
"expression": (VBExpression, "expression"),
"size": (VBSizeDefinition, self.size_definitions),
"size_range": (VBSizeDefinition, self.size_definitions),
"unsized_definition": (VBConsumer, "unsized_definition"),
}
def finalizeObject(self):
"""We can use this opportunity to now determine if we are a global"""
if self.amGlobal(self.scope):
self.registerAsGlobal()
def renderAsCode(self, indent=0):
"""Render this element as code"""
if self.optional:
return "%s=%s" % (self.identifier, self.expression.renderAsCode())
else:
return self.identifier
class VBSizeDefinition(VBNamespace):
"""Handles a VB Variable size definition"""
def __init__(self, scope="Private"):
"""Initialize the size definition"""
super(VBSizeDefinition, self).__init__(scope)
#
self.expression = None
self.sizes = []
self.size_ranges = []
#
self.auto_class_handlers = {
"size": (VBExpression, self.sizes),
"size_range": (VBSizeDefinition, self.size_ranges),
}
def renderAsCode(self, indent=0):
"""Render this element as code"""
if self.sizes:
return ", ".join([item.renderAsCode() for item in self.sizes])
else:
return "(%s)" % ", ".join([item.renderAsCode() for item in self.size_ranges])
class VBObject(VBNamespace):
"""Handles a VB Object"""
am_on_lhs = 0 # Set to 1 if the object is on the LHS of an assignment
def __init__(self, scope="Private"):
"""Initialize the object"""
super(VBObject, self).__init__(scope)
self.primary = None
self.modifiers = []
self.implicit_object = None
self.auto_class_handlers.update({
"primary": (VBConsumer, "primary"),
"attribute": (VBAttribute, self.modifiers),
"parameter_list": (VBParameterList, self.modifiers),
})
self.auto_handlers = (
"implicit_object",
)
def renderAsCode(self, indent=0):
"""Render this subroutine"""
return self._renderPartialObject(indent)
def finalizeObject(self):
"""Finalize the object
Check for any type markers.
"""
for obj in [self.primary] + self.modifiers:
try:
ending = obj.element.text[-1:] or " "
except AttributeError:
pass # It isn't a consumer so we can't check it
else:
if ending in "#$%&":
log.info(
"Removed type identifier from '%s'" % obj.element.text)
obj.element.text = obj.element.text[:-1]
def asString(self):
"""Return a string representation"""
if self.implicit_object:
log.info("Ooops an implicit object in definition")
ret = [self.primary.element.text] + \
[item.asString() for item in self.modifiers]
return ".".join(ret)
def fnPart(self):
"""Return the function part of this object (ie without any parameters"""
return self._renderPartialObject(indent=0, modifier=VBAttribute)
def _renderPartialObject(self, indent=0, modifier=None):
"""Render this object but only including modifiers of a certain class"""
#
# Check for implicit object and if we are one then find the nearest
# "With"
if self.implicit_object:
implicit_name = "%s." % self.getParentProperty("with_object")
else:
implicit_name = ""
#
# For the LHS objects we need to look for the local name for Function
# return arguments
if self.am_on_lhs:
obj_name = self.getLocalNameFor(self.primary.element.text)
else:
obj_name = self.primary.element.text
#
resolved_name = self.resolveName(obj_name)
#
# Check if this looks like a function
# TODO: This isn't very rigorous
if not self.modifiers:
if self.isAFunction(obj_name):
resolved_name += "()"
#
if modifier is None:
valid_modifiers = self.modifiers
else:
valid_modifiers = self.filterListByClass(self.modifiers, modifier)
#
return "%s%s%s" % (implicit_name,
resolved_name,
"".join([item.renderAsCode() for item in valid_modifiers]))
class VBLHSObject(VBObject):
"""Handles a VB Object appearing on the LHS of an assignment"""
am_on_lhs = 1 # Set to 1 if the object is on the LHS of an assignment
class VBAttribute(VBConsumer):
"""An attribute of an object"""
def renderAsCode(self, indent=0):
"""Render this attribute"""
return ".%s" % self.element.text
class VBParameterList(VBCodeBlock):
"""An parameter list for an object"""
def __init__(self, scope="Private"):
"""Initialize the object"""
super(VBParameterList, self).__init__(scope)
self.expressions = []
self.auto_class_handlers.update({
"expression": (VBExpression, self.expressions),
"missing_positional": (VBMissingPositional, self.expressions),
})
def renderAsCode(self, indent=0):
"""Render this attribute"""
#
# Check if we should replace () with [] - needed on the LHS of an
# assignment but not elsewhere since __call__ is mapped to __getitem__
# for array types
if self.searchParentProperty("brackets_are_indexes"):
fmt = "[%s]"
# Prevents double accounting in a(b(5)) expressions where b is a function
self.brackets_are_indexes = StopSearch
else:
fmt = "(%s)"
#
# Construct the list of parameters - this is harder than it looks because
# for any missing positional parameters we have to do some introspection
# to dig out the default value
param_list = []
for idx, element in zip(xrange(1000), self.expressions):
# Needed so that the element can get its default
element.parameter_index_position = idx
param_list.append(element.renderAsCode())
content = ", ".join(param_list)
return fmt % content
class VBMissingPositional(VBCodeBlock):
"""A positional argument that is missing from the argument list"""
def __init__(self, scope="Private"):
"""Initialize the object"""
super(VBMissingPositional, self).__init__(scope)
def renderAsCode(self, indent=0):
"""Render this attribute"""
#
# The parameter_index_position attribute will be set
# by our parent. We also need to look for the function name
# which depends on our context
try:
function_name = self.findParentOfClass(VBObject).fnPart()
except NestingError:
try:
function_name = self.getParentProperty("object").fnPart()
except NestingError:
raise UnresolvableName(
"Could not locate function name when supplying missing argument")
#
return "VBGetMissingArgument(%s, %d)" % (
function_name,
self.parameter_index_position)
class VBExpression(VBNamespace):
"""Represents an comment"""
def __init__(self, scope="Private"):
"""Initialize the assignment"""
super(VBExpression, self).__init__(scope)
self.parts = []
self.auto_class_handlers.update({
"sign": (VBExpressionPart, self.parts),
"pre_not": (VBExpressionPart, self.parts),
"par_expression": (VBParExpression, self.parts),
"point": (VBPoint, self.parts),
"operation": (VBOperation, self.parts),
"pre_named_argument": (VBExpressionPart, self.parts),
"pre_typeof": (VBUnrendered, self.parts),
})
# operators who requested regrouping (eg 'a Like b' -> 'Like(a,b)')
self.operator_groupings = []
def renderAsCode(self, indent=0):
"""Render this element as code"""
self.checkForOperatorGroupings()
return " ".join([item.renderAsCode(indent) for item in self.parts])
def checkForOperatorGroupings(self):
"""Look for operators who requested regrouping
Some operator cannot be translated in place (eg Like) since they must
be converted to functions. This means that we have to re-order the
parts of the expression.
"""
for item in self.operator_groupings:
idx = self.parts.index(item)
rh, lh = self.parts.pop(idx + 1), self.parts.pop(idx - 1)
item.rh, item.lh = rh, lh
class VBParExpression(VBNamespace):
"""A block in an expression"""
auto_handlers = [
"l_bracket",
"r_bracket",
]
def __init__(self, scope="Private"):
"""Initialize"""
super(VBParExpression, self).__init__(scope)
self.parts = []
self.named_argument = ""
self.auto_class_handlers.update({
"integer": (VBExpressionPart, self.parts),
"hexinteger": (VBExpressionPart, self.parts),
"stringliteral": (VBStringLiteral, self.parts),
"dateliteral": (VBDateLiteral, self.parts),
"floatnumber": (VBExpressionPart, self.parts),
"longinteger": (VBExpressionPart, self.parts),
"object": (VBObject, self.parts),
"par_expression": (VBParExpression, self.parts),
"operation": (VBOperation, self.parts),
"named_argument": (VBConsumer, "named_argument"),
"pre_not": (VBExpressionPart, self.parts),
"pre_typeof": (VBUnrendered, self.parts),
"point": (VBPoint, self.parts),
"sign": (VBExpressionPart, self.parts),
})
self.l_bracket = self.r_bracket = ""
# operators who requested regrouping (eg 'a Like b' -> 'Like(a,b)')
self.operator_groupings = []
def renderAsCode(self, indent=0):
"""Render this element as code"""
self.checkForOperatorGroupings()
if self.named_argument:
arg = "%s=" % self.named_argument.element.text
else:
arg = ""
ascode = " ".join([item.renderAsCode(indent) for item in self.parts])
return "%s%s%s%s" % (arg, self.l_bracket, ascode, self.r_bracket)
def checkForOperatorGroupings(self):
"""Look for operators who requested regrouping
Some operator cannot be translated in place (eg Like) since they must
be converted to functions. This means that we have to re-order the
parts of the expression.
"""
# Destructively scan the list so we don't try this a second time later!
while self.operator_groupings:
item = self.operator_groupings.pop()
idx = self.parts.index(item)
rh, lh = self.parts.pop(idx + 1), self.parts.pop(idx - 1)
item.rh, item.lh = rh, lh
class VBPoint(VBExpression):
"""A block in an expression"""
skip_handlers = [
"point",
]
def renderAsCode(self, indent=0):
"""Render this element as code"""
return "(%s)" % ", ".join([item.renderAsCode() for item in self.parts])
class VBExpressionPart(VBConsumer):
"""Part of an expression"""
def renderAsCode(self, indent=0):
"""Render this element as code"""
if self.element.name == "object":
#
# Check for implicit object (inside a with)
if self.element.text.startswith("."):
return "%s%s" % (self.getParentProperty("with_object"),
self.element.text)
elif self.element.text.lower() == "like":
return "Like(%s, %s)" % (self.lh.renderAsCode(), self.rh.renderAsCode())
elif self.element.name == "pre_named_argument":
return "%s=" % (self.element.text.split(":=")[0],)
elif self.element.name == "pre_not":
self.element.text = "not"
elif self.element.name == "hexinteger":
if self.element.text.endswith("&"):
return "0x%s" % self.element.text[2:-1]
else:
return "0x%s" % self.element.text[2:]
return self.element.text
def finalizeObject(self):
"""Finalize the object
Check for any type markers.
"""
ending = self.element.text[-1:] or " "
if ending in "#$%&":
log.info("Removed type identifier from '%s'" % self.element.text)
self.element.text = self.element.text[:-1]
class VBOperation(VBExpressionPart):
"""An operation in an expression"""
translation = {
"&": "+",
"^": "**",
"=": "==",
"\\": "//", # TODO: Is this right?
"is": "is",
"or": "or",
"and": "and", # TODO: are there any more?
"xor": "^",
"mod": "%",
}
def renderAsCode(self, indent=0):
"""Render this element as code"""
if self.element.text.lower() in self.translation:
return self.translation[self.element.text.lower()]
else:
return super(VBOperation, self).renderAsCode(indent)
def finalizeObject(self):
"""Finalize the object"""
if self.element.text.lower() in ("like", ):
log.info("Found regrouping operator, reversing order of operands")
self.parent.operator_groupings.append(self)
class VBStringLiteral(VBExpressionPart):
"""Represents a string literal"""
def renderAsCode(self, indent=0):
"""Render this element as code"""
#
# Remember to replace the double quotes with single ones
body = self.element.text[1:-1]
body = body.replace('""', '"')
#
if self.checkOptionYesNo("General", "AlwaysUseRawStringLiterals") == "Yes":
body = body.replace("'", "\'")
return "r'%s'" % body
else:
body = body.replace('\\', '\\\\')
body = body.replace("'", "\\'")
return "'%s'" % body
class VBDateLiteral(VBParExpression):
"""Represents a date literal"""
skip_handlers = [
"dateliteral",
]
def renderAsCode(self, indent=0):
"""Render this element as code"""
return "MakeDate(%s)" % ", ".join([item.renderAsCode() for item in self.parts])
class VBProject(VBNamespace):
"""Handles a VB Project"""
def __init__(self, scope="Private"):
"""Initialize the module"""
super(VBProject, self).__init__(scope)
self.global_objects = {} # This is where global variables live
def resolveLocalName(self, name, rendering_locals=0, requestedby=None):
"""Convert a local name to a fully resolved name
We search our local modules to see if they have a matching global variable
and if they do then we can construct the local name from it.
"""
# import pdb; pdb.set_trace()
if name in self.global_objects:
# Found as another module's public var - so mark it up and request
# an import
modulename = self.global_objects[
name].getParentProperty("modulename")
if requestedby:
requestedby.registerImportRequired(modulename)
return "%s.%s" % (modulename,
name)
else:
raise UnresolvableName(
"Name '%s' is not known in this namespace" % name)
class VBModule(VBCodeBlock):
"""Handles a VB Module"""
skip_handlers = [
]
# If this is 1 then local functions will become methods
convert_functions_to_methods = 0
indent_all_blocks = 0
# Can be used to dissallow new style classes
allow_new_style_class = 1
# Public objects defined here will not be globals
public_is_global = 0
# Put methods and attribute names in here which always need to be public
# like Class_Initialize and Class_Terminate for classes
always_public_attributes = []
def __init__(
self, scope="Private", modulename="unknownmodule", classname="MyClass",
superclasses=None):
"""Initialize the module"""
super(VBModule, self).__init__(scope)
self.auto_class_handlers.update({
"sub_definition": (VBSubroutine, self.locals),
"fn_definition": (VBFunction, self.locals),
"property_definition": (VBProperty, self.locals),
"enumeration_definition": (VBEnum, self.locals),
})
self.local_names = []
self.modulename = modulename
self.classname = classname
self.superclasses = superclasses or []
#
self.rendering_locals = 0
self.docstrings = []
self.module_imports = [] # The additional modules we need to import
def renderAsCode(self, indent=0):
"""Render this element as code"""
self.setCustomModulesAsGlobals()
if self.checkOptionYesNo("General", "TryToExtractDocStrings") == "Yes":
self.extractDocStrings()
#
# Pre-render the following before the import statments in case any
# of them ask us to do additional imports
header = self.renderModuleHeader(indent)
docstrings = self.renderDocStrings(indent)
declarations = self.renderDeclarations(indent + self.indent_all_blocks)
blocks = self.renderBlocks(indent + self.indent_all_blocks)
#
return "%s\n\n%s%s\n%s\n%s" % (
self.importStatements(indent),
header,
docstrings,
declarations,
blocks)
def importStatements(self, indent=0):
"""Render the standard import statements for this block"""
# Leading [""] gives a newline
other = [""] + ["import %s" % item for item in self.module_imports]
if self.checkOptionYesNo("General", "IncludeDebugCode") == "Yes":
debug = "\nfrom vb2py.vbdebug import *"
else:
debug = ""
return "from vb2py.vbfunctions import *%s%s" % (debug, "\n".join(other))
def renderDeclarations(self, indent):
"""Render the declarations as code
Most of the rendering is delegated to the individual declaration
classes. However, we cannot do this with properties since they need to
be grouped into a single assignment. We do the grouping here and
delegate the rendering to them.
"""
#
ret = []
self.rendering_locals = 1 # Used for switching behaviour (eg adding 'self')
#
# Handle non-properties and group properties together
properties = {}
for declaration in self.locals:
# Check for property
if isinstance(declaration, VBProperty):
log.info("Collected property '%s', decorator '%s'" % (
declaration.identifier, declaration.property_decorator_type))
decorators = properties.setdefault(declaration.identifier, {})
decorators[declaration.property_decorator_type] = declaration
else:
ret.append(declaration.renderAsCode(indent))
#
# Now render all the properties
for property in properties:
if properties[property]:
ret.append(properties[property].values()[0].renderPropertyGroup(
indent, property, **properties[property]))
#
self.rendering_locals = 0
#
return "".join(ret)
def renderBlocks(self, indent=0):
"""Render this module's blocks"""
return "".join([block.renderAsCode(indent) for block in self.blocks])
def extractDocStrings(self, indent=0):
"""Extract doc strings from this module
We look for comments in the body of the module and take all the ones before
anything that isn't a comment.
"""
for line in self.blocks[:]:
if isinstance(line, VBComment):
self.docstrings.append(line)
self.blocks.remove(line)
elif line.would_end_docstring:
break
def renderDocStrings(self, indent=0):
"""Render this module's docstrings"""
local_indent = indent + self.indent_all_blocks
if not self.docstrings:
return ""
elif len(self.docstrings) == 1:
return '%s"""%s"""\n' % (
self.getIndent(local_indent),
self.docstrings[0].asString())
else:
joiner = "\n%s" % self.getIndent(local_indent)
body_lines = [item.asString() for item in self.docstrings[1:]]
return '%s"""%s\n%s%s\n%s"""\n' % (
self.getIndent(local_indent),
self.docstrings[0].asString(),
self.getIndent(local_indent),
joiner.join(body_lines),
self.getIndent(local_indent))
def renderModuleHeader(self, indent=0):
"""Render a header for the module"""
return ""
def resolveLocalName(self, name, rendering_locals=0, requestedby=None):
"""Convert a local name to a fully resolved name
We search our local variables to see if we know the name. If we do then we
just report it.
"""
if name in self.local_names:
return name
for obj in self.locals:
if obj.identifier == name:
return self.enforcePrivateName(obj)
raise UnresolvableName(
"Name '%s' is not known in this namespace" % name)
def enforcePrivateName(self, obj):
"""Enforce the privacy for this object name if required"""
if obj.scope == "Private" and self.checkOptionYesNo("General", "RespectPrivateStatus") == "Yes" \
and obj.identifier not in self.always_public_attributes:
return "%s%s" % (Config["General", "PrivateDataPrefix"], obj.identifier)
else:
return obj.identifier
def setCustomModulesAsGlobals(self):
"""Set all the custom import modules as global modules
If the user has specified custom imports (eg Comctllib) then
we need to import these as globals in the project. We force
them into the project (if there is one) global object
table so that they can be resolved at run time.
"""
#
# Get global object table if there is one
try:
global_objects = self.getParentProperty("global_objects")
except NestingError:
return
#
log.info("Processing custom modules now")
custom_modules = Config.getItemNames("CustomIncludes")
#
# Do for all custom modules
for module_id in custom_modules:
#
# Import this module
module_name = Config["CustomIncludes", module_id]
log.info("Processing custom module %s (%s)" %
(module_id, module_name))
module = __import__("vb2py.custom.%s" %
module_name, globals(), locals(), ["custom"])
#
# Get a container to store the values in
vbmodule = VBCodeModule(modulename="vb2py.custom.%s" % module_name)
#
# Now set all items in the module to be global (if they don't seem to be
# hidden)
for item_name in dir(module):
if not item_name.startswith("_"):
log.info("Registered new custom global '%s'" % item_name)
global_objects[item_name] = vbmodule
class VBClassModule(VBModule):
"""Handles a VB Class"""
# If this is 1 then local functions will become methods
convert_functions_to_methods = 1
indent_all_blocks = 1
# Put methods and attribute names in here which always need to be public
# like Class_Initialize and Class_Terminate for classes
always_public_attributes = ["Class_Initialize", "Class_Terminate"]
def __init__(self, *args, **kw):
"""Initialize the class module"""
super(VBClassModule, self).__init__(*args, **kw)
self.name_substitution = {"Me": "self"}
def renderModuleHeader(self, indent=0):
"""Render this element as code"""
supers = self.superclasses[:]
if self.checkOptionYesNo("Classes", "UseNewStyleClasses") == "Yes" and \
self.allow_new_style_class:
supers.insert(0, "Object")
if supers:
return "class %s(%s):\n" % (self.classname, ", ".join(supers))
else:
return "class %s:\n" % self.classname
def resolveLocalName(self, name, rendering_locals=0, requestedby=None):
"""Convert a local name to a fully resolved name
We search our local variables to see if we know the name. If we do then we
need to add a self.
"""
# Don't do anything for locals
if rendering_locals:
prefix = ""
else:
prefix = "self."
#
if name in self.local_names:
return "%s%s" % (prefix, name)
for obj in self.locals:
if obj.identifier == name:
return "%s%s" % (prefix, self.enforcePrivateName(obj))
raise UnresolvableName(
"Name '%s' is not known in this namespace" % name)
def assignParent(self, parent):
"""Set our parent"""
super(VBClassModule, self).assignParent(parent)
self.identifier = self.classname
self.registerAsGlobal()
class VBCodeModule(VBModule):
"""Handles a VB Code module"""
public_is_global = 1 # Public objects defined here will be globals
def enforcePrivateName(self, obj):
"""Enforce the privacy for this object name if required
In a code module this is not required. Private variables and
definitions in a code module are not really hidden in the same way as
in a class module. They are accessible still. The main thing is that
they are not global.
"""
return obj.identifier
class VBFormModule(VBClassModule):
"""Handles a VB Form module"""
# If this is 1 then local functions will become methods
convert_functions_to_methods = 1
class VBCOMExternalModule(VBModule):
"""Handles external COM references"""
def __init__(self, *args, **kw):
"""Initialize the COM module
We always need win32com.client to be imported
"""
super(VBCOMExternalModule, self).__init__(*args, **kw)
self.module_imports.append("win32com.client")
docstring = VBRenderDirect(
"Automatically generated file based on project references")
self.docstrings.append(docstring)
def renderDeclarations(self, indent):
"""Render all the declarations
We have a list of libraries and objects in our names attribute
so we create a series of dummy classes with callable
attributes which return COM objects.
"""
library_code = []
for library, members in self.names.iteritems():
member_code = []
for member in members:
member_code.append(
' def %s(self):\n'
' """Create the %s.%s object"""\n'
' return win32com.client.Dispatch("%s.%s")\n'
'\n' % (member, library, member, library, member))
library_code.append('class _%s:\n'
' """COM Library"""\n\n'
'%s'
'%s = _%s()\n' % (
library,
''.join(member_code),
library,
library))
return '\n\n'.join(library_code)
class VBVariableDefinition(VBVariable):
"""Handles a VB Dim of a Variable"""
def renderAsCode(self, indent=0):
"""Render this element as code"""
#
local_name = self.resolveName(self.identifier)
#
# TODO: Can't handle implicit objects yet
if self.implicit_object:
warning = self.getWarning(
"UnhandledDefinition",
"Dim of implicit 'With' object (%s) is not supported" % local_name,
indent=indent, crlf=1)
else:
warning = ""
#
if self.string_size_indicator:
size = self.string_size_indicator
self.type = "FixedString"
else:
size = ""
#
# Make sure we resolve the type properly
local_type = self.resolveName(self.type)
#
if self.unsized_definition: # This is a 'Dim a()' statement
return "%s%s%s = vbObjectInitialize(objtype=%s)\n" % (
warning,
self.getIndent(indent),
local_name,
local_type)
elif self.size_definitions: # There is a size 'Dim a(10)'
if self.preserve_keyword:
preserve = ", %s" % (local_name, )
else:
preserve = ""
if size:
size = ", stringsize=" + size
rendered_size_definitions = [
item.renderAsCode() for item in self.size_definitions]
return "%s%s%s = vbObjectInitialize((%s,), %s%s%s)\n" % (
warning,
self.getIndent(indent),
local_name,
", ".join(rendered_size_definitions),
local_type,
preserve,
size)
elif self.new_keyword: # It is an 'Dim a as new ...'
return "%s%s%s = %s(%s)\n" % (
warning,
self.getIndent(indent),
local_name,
local_type,
size)
else: # This is just 'Dim a as frob'
return "%s%s%s = %s(%s)\n" % (
warning,
self.getIndent(indent),
local_name,
local_type,
size)
def finalizeObject(self):
"""Finalize the object
Check for any type markers.
"""
ending = self.identifier[-1:] or " "
if ending in "#$%&":
log.info("Removed type identifier from '%s'" % self.identifier)
self.identifier = self.identifier[:-1]
class VBConstant(VBVariableDefinition):
"""Represents a constant in VB"""
def renderAsCode(self, indent=0):
"""Render this element as code"""
# local_name = self.getLocalNameFor(self.identifier)
local_name = self.resolveName(self.identifier)
return "%s%s = %s\n" % (
self.getIndent(indent),
local_name,
self.expression.renderAsCode())
class VBReDim(VBCodeBlock):
"""Represents a Redim statement"""
def __init__(self, scope="Private"):
"""Initialize the Redim"""
super(VBReDim, self).__init__(scope)
#
self.variables = []
self.preserve = None
#
self.auto_class_handlers = {
"object_definition": (VBVariableDefinition, self.variables),
"preserve_keyword": (VBConsumer, "preserve"),
}
def renderAsCode(self, indent=0):
"""Render this element as code"""
for var in self.variables:
var.preserve_keyword = self.preserve
return "".join([var.renderAsCode(indent) for var in self.variables])
class VBAssignment(VBNamespace):
"""An assignment statement"""
auto_handlers = [
]
def __init__(self, scope="Private"):
"""Initialize the assignment"""
super(VBAssignment, self).__init__(scope)
self.parts = []
self.object = None
self.auto_class_handlers.update({
"expression": (VBExpression, self.parts),
"object": (VBLHSObject, "object")
})
def asString(self):
"""Convert to a nice representation"""
return "%s = %s" % (self.object, self.parts)
def renderAsCode(self, indent=0):
"""Render this element as code"""
self.checkForModuleGlobals()
self.object.brackets_are_indexes = 1 # Convert brackets on LHS to []
return "%s%s = %s\n" % (self.getIndent(indent),
self.object.renderAsCode(),
self.parts[0].renderAsCode(indent))
def checkForModuleGlobals(self):
"""Check if this assignment requires a global statement
We can use this opportunity to now check if we need to append a
'global' statement to our container. If we are in a CodeModule an
assignment and the LHS of the assignment is a module level variable
which is not locally shadowed then we need a global.
So the procedure is,
- look for our parent who is a subroutine type
- if we don't have one then skip out
- see if this parent knows us, if so then we are a subroutine local
- also see if we are the subroutine name
- look for our parent who is a module type
- see if this parent knows us, if so then we are a module local
- if we are then tell our subroutine parent that we need a global statement
"""
log.info("Checking whether to use a global statement for '%s'" %
self.object.primary.element.text)
# import pdb; pdb.set_trace()
try:
enclosing_sub = self.findParentOfClass(VBSubroutine)
except NestingError:
return # We are not in a subroutine
log.info("Found sub")
try:
name = enclosing_sub.resolveLocalName(
self.object.primary.element.text)
except UnresolvableName:
if enclosing_sub.identifier == self.object.primary.element.text:
return
else:
return # We are a subroutine local
log.info("Am not local")
try:
enclosing_module = self.findParentOfClass(VBCodeModule)
except NestingError:
return # We are not in a module
log.info("Found code module")
try:
name = enclosing_module.resolveLocalName(
self.object.primary.element.text)
except UnresolvableName:
return # We are not known at the module level
# If we get to here then we are a module level local!
enclosing_sub.globals_required[
self.resolveName(self.object.primary.element.text)] = 1
log.info("Added a module level global: '%s'" %
self.resolveName(self.object.primary.element.text))
class VBSpecialAssignment(VBAssignment):
"""A special assignment eg LSet, RSet where the assignment ends up as a
function call"""
fn_name = None
def renderAsCode(self, indent=0):
"""Render this element as code"""
self.checkForModuleGlobals()
self.object.brackets_are_indexes = 1 # Convert brackets on LHS to []
return "%s%s = %s(%s, %s)\n" % (self.getIndent(indent),
self.object.renderAsCode(),
self.fn_name,
self.object.renderAsCode(),
self.parts[0].renderAsCode(indent))
class VBLSet(VBSpecialAssignment):
"""An LSet statement"""
fn_name = "LSet"
class VBRSet(VBSpecialAssignment):
"""An RSet statement"""
fn_name = "RSet"
class VBSet(VBAssignment):
"""A set statement"""
auto_handlers = [
"new_keyword",
]
new_keyword = ""
def renderAsCode(self, indent=0):
"""Render this element as code"""
if not self.new_keyword:
return super(VBSet, self).renderAsCode(indent)
else:
return "%s%s = %s()\n" % (
self.getIndent(indent),
self.object.renderAsCode(),
self.parts[0].renderAsCode(indent))
class VBEnd(VBAssignment):
"""An end statement"""
def renderAsCode(self, indent=0):
"""Render this element as code"""
return "%ssys.exit(0)\n" % self.getIndent(indent)
class VBCall(VBCodeBlock):
"""A call statement"""
auto_handlers = [
]
def __init__(self, scope="Private"):
"""Initialize the assignment"""
super(VBCall, self).__init__(scope)
self.parameters = []
self.object = None
self.auto_class_handlers = ({
"expression": (VBParExpression, self.parameters),
"missing_positional": (VBMissingPositional, self.parameters),
"object": (VBObject, "object")
})
def renderAsCode(self, indent=0):
"""Render this element as code"""
if self.parameters:
#
# Construct the list of parameters - this is harder than it looks because
# for any missing positional parameters we have to do some introspection
# to dig out the default value
param_list = []
for idx, element in zip(xrange(1000), self.parameters):
# Needed so that the element can get its default
element.parameter_index_position = idx
param_list.append(element.renderAsCode())
params = ", ".join(param_list)
else:
params = ""
#
self.object.am_on_lhs = 1
#
return "%s%s(%s)\n" % (self.getIndent(indent),
self.object.renderAsCode(),
params)
class VBExplicitCall(VBCodeBlock):
"""A call statement on a single line with parenthesis
This is illegal in VB but can be found in VBSCript
"""
auto_handlers = [
]
def __init__(self, scope="Private"):
"""Initialize the assignment"""
super(VBExplicitCall, self).__init__(scope)
self.parameters = []
self.object = None
self.auto_class_handlers = ({
"expression": (VBParExpression, self.parameters),
"missing_positional": (VBMissingPositional, self.parameters),
"qualified_object": (VBObject, "object")
})
def renderAsCode(self, indent=0):
"""Render this element as code"""
if self.parameters:
#
# Something has gone wrong here because there shouldn't be any parameters
# in the call. These should be encapsulated in the object.
raise VBParserError(
'Unexpected parameters (%s) in explicit call' % self.parameters)
#
self.object.am_on_lhs = 1
#
return "%s%s\n" % (self.getIndent(indent),
self.object.renderAsCode())
class VBExitStatement(VBConsumer):
"""Represents an exit statement"""
def renderAsCode(self, indent=0):
"""Render this element as code"""
indenter = self.getIndent(indent)
rv_name = Config["Functions", "ReturnVariableName"]
if self.element.text == "Exit Function":
return "%sreturn %s\n" % (indenter, rv_name)
elif self.element.text == "Exit Sub":
return "%sreturn\n" % indenter
elif self.element.text == "Exit Property":
if self.getParentProperty("property_decorator_type") == "Get":
return "%sreturn %s\n" % (indenter, rv_name)
else:
return "%sreturn\n" % indenter
else:
return "%sbreak\n" % indenter
class VBComment(VBConsumer):
"""Represents an comment"""
#
# Used to indicate if this is a valid statement
not_a_statement = 0
def renderAsCode(self, indent=0):
"""Render this element as code"""
return self.getIndent(indent) + "#%s\n" % self.element.text
def asString(self):
"""Render this element as a string"""
return self.element.text
class VBLabel(VBUnrendered):
"""Represents a label"""
def renderAsCode(self, indent):
"""Render the label"""
if Config["Labels", "IgnoreLabels"] == "Yes":
return ""
else:
return super(VBLabel, self).renderAsCode(indent)
class VBOpen(VBCodeBlock):
"""Represents an open statement"""
def __init__(self, scope="Private"):
"""Initialize the open"""
super(VBOpen, self).__init__(scope)
#
self.filename = None
self.open_modes = []
self.channel = None
self.access_length = None
#
self.auto_class_handlers = ({
"filename": (VBParExpression, "filename"),
"open_mode": (VBConsumer, self.open_modes),
"channel": (VBParExpression, "channel"),
"access_length": (VBParExpression, "access_length"),
})
#
self.open_mode_lookup = {
"Input": "r",
"Output": "w",
"Append": "a",
"Binary": "b",
}
def renderAsCode(self, indent=0):
"""Render this element as code"""
file_mode = ""
todo = []
for mode in self.open_modes:
m = mode.element.text.strip()
try:
file_mode += self.open_mode_lookup[m.strip()]
except KeyError:
todo.append("'%s'" % m.strip())
if self.access_length is not None:
todo.append("Access length is not supported (%s)" %
self.access_length.renderAsCode())
if todo:
todo_warning = self.getWarning(
"UnknownFileMode", ", ".join(todo), indent, crlf=1)
else:
todo_warning = ""
#
return "%s%sVBFiles.openFile(%s, %s, '%s')\n" % (
todo_warning,
self.getIndent(indent),
self.channel.renderAsCode(),
self.filename.renderAsCode(),
file_mode)
class VBClose(VBCodeBlock):
"""Represents a close statement"""
def __init__(self, scope="Private"):
"""Initialize the open"""
super(VBClose, self).__init__(scope)
#
self.channels = []
#
self.auto_class_handlers = ({
"expression": (VBParExpression, self.channels),
})
def renderAsCode(self, indent=0):
"""Render this element as code"""
if not self.channels:
return "%sVBFiles.closeFile()\n" % (
self.getIndent(indent))
else:
ret = []
for channel in self.channels:
ret.append("%sVBFiles.closeFile(%s)\n" % (
self.getIndent(indent),
channel.renderAsCode()))
return "".join(ret)
class VBSeek(VBCodeBlock):
"""Represents a seek statement"""
def __init__(self, scope="Private"):
"""Initialize the seek"""
super(VBSeek, self).__init__(scope)
#
self.expressions = []
#
self.auto_class_handlers = ({
"expression": (VBParExpression, self.expressions),
})
def renderAsCode(self, indent=0):
"""Render this element as code"""
return "%sVBFiles.seekFile(%s, %s)\n" % (
self.getIndent(indent),
self.expressions[0].renderAsCode(),
self.expressions[1].renderAsCode(),)
class VBInput(VBCodeBlock):
"""Represents an input statement"""
input_type = "Input"
def __init__(self, scope="Private"):
"""Initialize the open"""
super(VBInput, self).__init__(scope)
#
self.channel = None
self.variables = []
#
self.auto_class_handlers = ({
"channel_id": (VBParExpression, "channel"),
"expression": (VBExpression, self.variables),
})
def renderAsCode(self, indent=0):
"""Render this element as code"""
# Make sure variables are converted as if they are on the LHS of an
# assignment
for var in self.variables:
var.brackets_are_indexes = 1
#
return "%s%s = VBFiles.get%s(%s, %d)\n" % (
self.getIndent(indent),
", ".join([var.renderAsCode() for var in self.variables]),
self.input_type,
self.channel.renderAsCode(),
len(self.variables))
class VBLineInput(VBInput):
"""Represents an input statement"""
input_type = "LineInput"
class VBPrint(VBCodeBlock):
"""Represents a print statement"""
def __init__(self, scope="Private"):
"""Initialize the print"""
super(VBPrint, self).__init__(scope)
#
self.channel = VBRenderDirect("None")
self.variables = []
self.hold_cr = None
#
self.auto_class_handlers = ({
"channel_id": (VBParExpression, "channel"),
"expression": (VBExpression, self.variables),
"print_separator": (VBPrintSeparator, self.variables),
})
def renderAsCode(self, indent=0):
"""Render this element as code"""
print_list = ", ".join([var.renderAsCode()
for var in self.variables if var.renderAsCode()])
if self.variables:
if self.variables[-1].renderAsCode() not in (None, "\t"):
print_list += ", '\\n'"
return "%sVBFiles.writeText(%s, %s)\n" % (
self.getIndent(indent),
self.channel.renderAsCode(),
print_list)
class VBPrintSeparator(VBConsumer):
"""Represents a print statement separator"""
def renderAsCode(self, indent=0):
"""Render this element as code"""
if self.element.text == ";":
return None
elif self.element.text == ",":
return '"\\t"'
else:
raise UnhandledStructureError(
"Unknown print separator '%s'" % self.element.text)
class VBName(VBCodeBlock):
"""Represents a name statement"""
def __init__(self, scope="Private"):
"""Initialize the print"""
super(VBName, self).__init__(scope)
#
self.channel = VBRenderDirect("None")
self.files = []
#
self.auto_class_handlers = ({
"expression": (VBExpression, self.files),
})
def renderAsCode(self, indent=0):
"""Render this element as code"""
self.registerImportRequired("os")
file_list = ", ".join([fle.renderAsCode() for fle in self.files])
return "%sName(%s)\n" % (
self.getIndent(indent),
file_list)
class VBUserType(VBCodeBlock):
"""Represents a select block"""
auto_handlers = [
]
select_variable_index = 0
def __init__(self, scope="Private"):
"""Initialize the Select"""
super(VBUserType, self).__init__(scope)
#
self.variables = []
self.identifier = None
#
self.auto_class_handlers = {
"identifier": (VBConsumer, "identifier"),
"object_definition": (VBVariable, self.variables),
}
def renderAsCode(self, indent=0):
"""Render this element as code"""
vars = []
if not self.variables:
vars.append(VBPass().renderAsCode(indent + 2))
else:
for var in self.variables:
vars.append("%sself.%s = %s()" % (
self.getIndent(indent + 2),
var.identifier,
var.type))
#
return ("%sclass %s:\n"
"%sdef __init__(self):\n%s\n\n" % (
self.getIndent(indent),
self.identifier.element.text,
self.getIndent(indent + 1),
"\n".join(vars)))
class VBSubroutine(VBCodeBlock):
"""Represents a subroutine"""
public_is_global = 0 # Public objects defined here will not be globals
def __init__(self, scope="Private"):
"""Initialize the subroutine"""
super(VBSubroutine, self).__init__(scope)
self.identifier = None
self.scope = scope
self.block = VBPass()
self.parameters = []
self.globals_required = {}
# A list of objects required in a global statement
self.type = None
self.static = None
#
self.auto_class_handlers.update({
"formal_param": (VBVariable, self.parameters),
"block": (VBCodeBlock, "block"),
"type_definition": (VBUnrendered, "type"),
})
self.auto_handlers = [
"identifier",
"scope",
"static",
]
self.skip_handlers = [
"sub_definition",
]
self.rendering_locals = 0
def renderAsCode(self, indent=0):
"""Render this subroutine"""
code_block = self.block.renderAsCode(indent + 1)
locals = [declaration.renderAsCode(indent + 1)
for declaration in self.block.locals]
if self.static:
log.warn("Static function detected - static is not supported")
ret = "\n%sdef %s(%s):\n%s%s%s" % (
self.getIndent(indent),
self.getParentProperty("enforcePrivateName")(self),
self.renderParameters(),
self.renderGlobalStatement(indent + 1),
"\n".join(locals),
code_block)
return ret
def renderParameters(self):
"""Render the parameter list"""
params = [param.renderAsCode() for param in self.parameters]
if self.getParentProperty("convert_functions_to_methods"):
params.insert(0, "self")
return ", ".join(params)
def resolveLocalName(self, name, rendering_locals=0, requestedby=None):
"""Convert a local name to a fully resolved name
We search our local variables and parameters to see if we know the
name. If we do then we return the original name.
"""
names = [obj.identifier for obj in self.block.locals + self.parameters]
if name in names:
return name
else:
raise UnresolvableName(
"Name '%s' is not known in this namespace" % name)
def renderGlobalStatement(self, indent=0):
"""Render the global statement if we need it"""
if self.globals_required:
return "%sglobal %s\n" % (self.getIndent(indent),
", ".join(self.globals_required.keys()))
else:
return ""
def assignParent(self, *args, **kw):
"""Assign our parent
We can use this opportunity to now determine if we are a global
"""
super(VBSubroutine, self).assignParent(*args, **kw)
#
# Check if we will be considered a global for the project
if hasattr(self, "parent"):
if self.parent.amGlobal(self.scope):
self.registerAsGlobal()
class VBFunction(VBSubroutine):
"""Represents a function"""
is_function = 1 # We need () if we are accessed directly
def renderAsCode(self, indent=0):
"""Render this subroutine"""
#
# Set a name conversion to capture the function name
# Assignments to this function name should go to the _ret parameter
return_var = Config["Functions", "ReturnVariableName"]
self.name_substitution[self.identifier] = return_var
#
if self.block:
block = self.block.renderAsCode(indent + 1)
else:
block = self.getIndent(indent + 1) + "pass\n"
#
locals = [declaration.renderAsCode(indent + 1)
for declaration in self.block.locals]
#
if Config["Functions", "PreInitializeReturnVariable"] == "Yes":
pre_init = "%s%s = None\n" % (
self.getIndent(indent + 1),
return_var)
else:
pre_init = ""
ret = "\n%sdef %s(%s):\n%s%s%s%s%sreturn %s\n" % (
self.getIndent(indent),
self.getParentProperty("enforcePrivateName")(self),
self.renderParameters(),
self.renderGlobalStatement(indent + 1),
pre_init,
"\n".join(locals),
block,
self.getIndent(indent + 1),
return_var)
return ret
class VBIf(VBCodeBlock):
"""Represents an if block"""
auto_handlers = [
]
skip_handlers = [
"if_statement",
]
def __init__(self, scope="Private"):
"""Initialize the If"""
super(VBIf, self).__init__(scope)
#
self.condition = None
self.if_block = VBPass()
self.elif_blocks = []
self.else_block = None
#
self.auto_class_handlers = {
"condition": (VBExpression, "condition"),
"if_block": (VBCodeBlock, "if_block"),
"else_if_statement": (VBElseIf, self.elif_blocks),
"else_block": (VBCodeBlock, "else_block"),
}
def renderAsCode(self, indent=0):
"""Render this element as code"""
ret = self.getIndent(indent) + \
"if %s:\n" % self.condition.renderAsCode()
ret += self.if_block.renderAsCode(indent + 1)
if self.elif_blocks:
for elif_block in self.elif_blocks:
ret += elif_block.renderAsCode(indent)
if self.else_block:
ret += self.getIndent(indent) + "else:\n"
ret += self.else_block.renderAsCode(indent + 1)
return ret
class VBElseIf(VBIf):
"""Represents an ElseIf statement"""
def __init__(self, scope="Private"):
"""Initialize the If"""
super(VBIf, self).__init__(scope)
#
self.condition = None
self.elif_block = VBPass()
#
self.auto_class_handlers = {
"condition": (VBExpression, "condition"),
"else_if_block": (VBCodeBlock, "elif_block"),
}
def renderAsCode(self, indent=0):
"""Render this element as code"""
ret = self.getIndent(indent) + \
"elif %s:\n" % self.condition.renderAsCode()
ret += self.elif_block.renderAsCode(indent + 1)
return ret
class VBInlineIf(VBCodeBlock):
"""Represents an if block"""
auto_handlers = [
]
skip_handlers = [
"if_statement",
]
def __init__(self, scope="Private"):
"""Initialize the If"""
super(VBInlineIf, self).__init__(scope)
#
self.condition = None
self.statements = []
#
self.auto_class_handlers = {
"condition": (VBExpression, "condition"),
"statement": (VBCodeBlock, self.statements),
"inline_implicit_call": (VBCodeBlock, self.statements), # TODO: remove me
}
def renderAsCode(self, indent=0):
"""Render this element as code"""
assert self.statements, "Inline If has no statements!"
ret = "%sif %s:\n%s" % (
self.getIndent(indent),
self.condition.renderAsCode(),
self.statements[0].renderAsCode(indent + 1),)
#
if len(self.statements) == 2:
ret += "%selse:\n%s" % (
self.getIndent(indent),
self.statements[1].renderAsCode(indent + 1))
elif len(self.statements) > 2:
raise VBParserError(
"Inline if with more than one clause not supported")
#
return ret
class VBSelect(VBCodeBlock):
"""Represents a select block"""
auto_handlers = [
]
_select_variable_index = 0
def __init__(self, scope="Private"):
"""Initialize the Select"""
super(VBSelect, self).__init__(scope)
#
self.blocks = []
self.comment_block = VBNothing()
#
self.auto_class_handlers = {
"expression": (VBExpression, "expression"),
"case_item_block": (VBCaseItem, self.blocks),
"case_else_block": (VBCaseElse, self.blocks),
"case_comment_block": (VBOptionalCodeBlock, "comment_block"),
}
#
# Change the variable index if we are a select
if self.__class__ == VBSelect:
self.select_variable_index = VBSelect._select_variable_index
VBSelect._select_variable_index = VBSelect._select_variable_index + 1
def renderAsCode(self, indent=0):
"""Render this element as code"""
#
# Change if/elif status on the first child
if self.blocks:
self.blocks[0].if_or_elif = "if"
#
if Config["Select", "EvaluateVariable"] != "EachTime":
ret = "%s%s = %s\n" % (self.getIndent(indent),
self.getSelectVariable(),
self.expression.renderAsCode())
else:
ret = ""
ret += self.comment_block.renderAsCode()
ret += "".join([item.renderAsCode(indent) for item in self.blocks])
return ret
def getSelectVariable(self):
"""Return the name of the select variable"""
eval_variable = Config["Select", "EvaluateVariable"]
if eval_variable == "Once":
if Config["Select", "UseNumericIndex"] == "Yes":
select_var = "%s%d" % (Config["Select", "SelectVariablePrefix"],
self.getParentProperty("select_variable_index"))
else:
select_var = Config["Select", "SelectVariablePrefix"]
elif eval_variable == "EachTime":
select_var = "%s" % self.getParentProperty(
"expression").renderAsCode()
else:
raise InvalidOption(
"Evaluate variable option not understood: '%s'" % eval_variable)
return select_var
class VBCaseBlock(VBSelect):
"""Represents a select block"""
if_or_elif = "elif" # Our parent will change this if we are the first
def __init__(self, scope="Private"):
"""Initialize the Select"""
super(VBCaseBlock, self).__init__(scope)
#
self.lists = []
self.expressions = []
self.block = VBPass()
#
self.auto_class_handlers = {
"case_list": (VBCaseItem, self.lists),
"expression": (VBExpression, self.expressions),
"block": (VBCodeBlock, "block"),
}
class VBCaseItem(VBCaseBlock):
"""Represents a select block"""
def renderAsCode(self, indent=0):
"""Render this element as code"""
select_variable_index = self.getParentProperty("select_variable_index")
if self.lists:
expr = " or ".join(["(%s)" % item.renderAsCode()
for item in self.lists])
return "%s%s %s:\n%s" % (
self.getIndent(indent),
self.if_or_elif,
expr,
self.block.renderAsCode(indent + 1))
elif len(self.expressions) == 1:
expression_text = self.expressions[0].renderAsCode()
# Now check for "Is"
if expression_text.startswith("Is "):
# This has "Is" - replace it and use the rest of the expression
return "%s %s" % (
self.getSelectVariable(),
expression_text[3:])
else:
# Standard case
return "%s == %s" % (
self.getSelectVariable(),
expression_text)
elif len(self.expressions) == 2:
return "%s <= %s <= %s" % (
self.expressions[0].renderAsCode(),
self.getSelectVariable(),
self.expressions[1].renderAsCode())
raise VBParserError("Error rendering case item")
class VBCaseElse(VBCaseBlock):
"""Represents a select block"""
def renderAsCode(self, indent=0):
"""Render this element as code"""
return "%selse:\n%s" % (self.getIndent(indent),
self.block.renderAsCode(indent + 1))
class VBFor(VBCodeBlock):
"""Represents a for statement"""
_for_variable_index = 0
def __init__(self, scope="Private"):
"""Initialize the Select"""
super(VBFor, self).__init__(scope)
#
self.block = VBPass()
self.expressions = []
#
self.auto_class_handlers = {
"expression": (VBExpression, self.expressions),
"block": (VBCodeBlock, "block"), # Used for full 'for'
"body": (VBCodeBlock, "block"), # Used for inline 'for'
"object": (VBObject, "object"),
}
def renderAsCode(self, indent=0):
"""Render this element as code"""
range_statement = ", ".join(
[item.renderAsCode() for item in self.expressions])
# Watch out for the weird dotted name in the for
self.handleDottedName(indent)
return "%sfor %s in vbForRange(%s):\n%s%s" % (
self.getIndent(indent),
self.loopname,
range_statement,
self.copiedname,
self.block.renderAsCode(indent + 1))
def handleDottedName(self, indent):
"""Handle a dotted name as the identifier
The For can reference a dotted name, which presumably changes the
value of that attribute. We can only do this by a local re-assignment
"""
name = self.object.renderAsCode()
if "." not in name:
# Ok, normal case
self.loopname = name
self.copiedname = ""
else:
# Ooops, assigning to a dotted name in the loop
self.loopname = "_idx%s" % VBFor._for_variable_index
VBFor._for_variable_index += 1
self.copiedname = "%s%s = %s\n" % (
self.getIndent(indent + 1),
name,
self.loopname
)
class VBForEach(VBFor):
"""Represents a for each statement"""
def renderAsCode(self, indent=0):
"""Render this element as code"""
# Watch out for the weird dotted name in the for
self.handleDottedName(indent)
return "%sfor %s in %s:\n%s%s" % (
self.getIndent(indent),
self.loopname,
self.expressions[0].renderAsCode(),
self.copiedname,
self.block.renderAsCode(indent + 1))
class VBWhile(VBCodeBlock):
"""Represents a while statement"""
def __init__(self, scope="Private"):
"""Initialize the Select"""
super(VBWhile, self).__init__(scope)
#
self.block = VBPass()
self.expression = None
#
self.auto_class_handlers = {
"expression": (VBExpression, "expression"),
"block": (VBCodeBlock, "block"),
}
def renderAsCode(self, indent=0):
"""Render this element as code"""
return "%swhile %s:\n%s" % (
self.getIndent(indent),
self.expression.renderAsCode(),
self.block.renderAsCode(indent + 1))
class VBDo(VBCodeBlock):
"""Represents a do statement"""
def __init__(self, scope="Private"):
"""Initialize the Select"""
super(VBDo, self).__init__(scope)
#
self.block = VBPass()
self.pre_while = None
self.pre_until = None
self.post_while = None
self.post_until = None
#
self.auto_class_handlers = {
"while_clause": (VBExpression, "pre_while"),
"until_clause": (VBExpression, "pre_until"),
"post_while_clause": (VBExpression, "post_while"),
"post_until_clause": (VBExpression, "post_until"),
"block": (VBCodeBlock, "block"),
}
def renderAsCode(self, indent=0):
"""Render this element as code
There are five different kinds of do loop
pre_while
pre_until
post_while
post_until
no conditions
"""
if self.pre_while:
return "%swhile %s:\n%s" % (
self.getIndent(indent),
self.pre_while.renderAsCode(),
self.block.renderAsCode(indent + 1))
elif self.pre_until:
return "%swhile not (%s):\n%s" % (
self.getIndent(indent),
self.pre_until.renderAsCode(),
self.block.renderAsCode(indent + 1))
elif self.post_while:
return "%swhile 1:\n%s%sif not (%s):\n%sbreak\n" % (
self.getIndent(indent),
self.block.renderAsCode(indent + 1),
self.getIndent(indent + 1),
self.post_while.renderAsCode(),
self.getIndent(indent + 2))
elif self.post_until:
return "%swhile 1:\n%s%sif %s:\n%sbreak\n" % (
self.getIndent(indent),
self.block.renderAsCode(indent + 1),
self.getIndent(indent + 1),
self.post_until.renderAsCode(),
self.getIndent(indent + 2))
else:
return "%swhile 1:\n%s" % (
self.getIndent(indent),
self.block.renderAsCode(indent + 1))
class VBWith(VBCodeBlock):
"""Represents a with statement"""
_with_variable_index = 0
def __init__(self, scope="Private"):
"""Initialize the Select"""
super(VBWith, self).__init__(scope)
#
self.block = None
self.expression = None
#
self.auto_class_handlers = {
"expression": (VBExpression, "expression"),
"block": (VBCodeBlock, "block"),
}
#
self.with_variable_index = VBWith._with_variable_index
VBWith._with_variable_index = VBWith._with_variable_index + 1
def renderAsCode(self, indent=0):
"""Render this element as code"""
#
# Don't even do anything if there is no body to the With
if self.block:
#
# Before we render the expression we change its parent to our parent because
# we don't want any ".implicit" objects to be evaluated using our
# With object
self.expression.parent = self.parent
#
if self._evaluateVariableOption() == "EveryTime":
self.with_object = self.expression.renderAsCode()
return self.block.renderAsCode(indent)
else:
if self.checkOptionYesNo("With", "UseNumericIndex") == "Yes":
varname = "%s%d" % (
Config["With", "WithVariablePrefix"],
self.with_variable_index)
else:
varname = Config["With", "WithVariablePrefix"]
self.with_object = varname
return "%s%s = %s\n%s" % (
self.getIndent(indent),
varname,
self.expression.renderAsCode(),
self.block.renderAsCode(indent))
else:
return ""
def _evaluateVariableOption(self):
return self.checkOptionChoice(
"With", "EvaluateVariable", ("EveryTime", "Once"))
class VBProperty(VBSubroutine):
"""Represents a property definition"""
def __init__(self, scope="Private"):
"""Initialize the Select"""
super(VBProperty, self).__init__(scope)
self.property_decorator_type = None
#
self.auto_handlers.append("property_decorator_type")
def renderPropertyGroup(self, indent, name, Let=None, Set=None, Get=None):
"""Render a group of property statements"""
if Let and Set:
raise UnhandledStructureError(
"Cannot handle both Let and Set properties for an object")
log.info("Rendering property group '%s'" % name)
ret = []
params = []
pset = Let or Set
pget = Get
#
# Get the name for this property - respecting the hidden status
obj = pset or pget # Need at least one!
proper_name = self.getParentProperty("enforcePrivateName")(obj)
if pset:
self.getParentProperty("local_names").append(
pset.identifier) # Store property name for namespace analysis
pset.identifier = "%s%s" % (
Config["Properties", "LetSetVariablePrefix"], pset.identifier)
ret.append(pset.renderAsCode(indent))
params.append("fset=%s" %
self.getParentProperty("enforcePrivateName")(pset))
if pget:
self.getParentProperty("local_names").append(
pget.identifier) # Store property name for namespace analysis
pget.__class__ = VBFunction # Needs to be a function
pget.name_substitution[pget.identifier] = Config[
"Functions", "ReturnVariableName"]
pget.identifier = "%s%s" % (
Config["Properties", "GetVariablePrefix"], pget.identifier)
ret.append(pget.renderAsCode(indent))
params.append("fget=%s" %
self.getParentProperty("enforcePrivateName")(pget))
return "\n%s%s%s = property(%s)\n" % (
"".join(ret),
self.getIndent(indent),
proper_name,
", ".join(params))
class VBEnum(VBCodeBlock):
"""Represents an enum definition"""
def __init__(self, scope="Private"):
"""Initialize the Select"""
super(VBEnum, self).__init__(scope)
self.enumerations = []
self.identifier = None
#
self.auto_class_handlers = {
"enumeration_item": (VBEnumItem, self.enumerations),
}
self.auto_handlers = ["identifier"]
def renderAsCode(self, indent=0):
"""Render a group of property statements"""
count = 0
ret = []
for enumeration in self.enumerations:
if enumeration.expression:
cnt = enumeration.expression.renderAsCode()
else:
cnt = count
count += 1
ret.append("%s%s = %s" % (self.getIndent(indent),
enumeration.identifier.element.text,
cnt))
return "%s# Enumeration '%s'\n%s\n" % (
self.getIndent(indent),
self.identifier,
"\n".join(ret),
)
class VBEnumItem(VBCodeBlock):
"""Represents an enum item"""
def __init__(self, scope="Private"):
"""Initialize the Select"""
super(VBEnumItem, self).__init__(scope)
self.identifier = None
self.expression = None
#
self.auto_class_handlers = {
"identifier": (VBConsumer, "identifier"),
"expression": (VBExpression, "expression"),
}
class VB2PYDirective(VBCodeBlock):
"""Handles a vb2py directive"""
skip_handlers = [
"vb2py_directive",
]
would_end_docstring = 0
def __init__(self, scope="Private"):
"""Initialize the module"""
super(VB2PYDirective, self).__init__(scope)
self.auto_handlers = (
"directive_type",
"config_name",
"config_section",
"expression",
)
self.directive_type = "Set"
self.config_name = None
self.config_section = None
self.expression = None
def renderAsCode(self, indent=0):
"""We use the rendering to do our stuff"""
if self.directive_type == "Set":
Config.setLocalOveride(
self.config_section, self.config_name, self.expression)
log.info("Doing a set: %s" %
str((self.config_section, self.config_name, self.expression)))
elif self.directive_type == "Unset":
Config.removeLocalOveride(self.config_section, self.config_name)
log.info("Doing an uset: %s" %
str((self.config_section, self.config_name)))
elif self.directive_type in ("GlobalSet", "GlobalAdd"):
pass # already handled this
elif self.directive_type == "Add":
Config.addLocalOveride(
self.config_section, self.config_name, self.expression)
log.info("Adding a setting: %s" %
str((self.config_section, self.config_name, self.expression)))
else:
raise DirectiveError(
"Directive not understood: '%s'" % self.directive_type)
return ""
def assignParent(self, *args, **kw):
"""Assign our parent
We can use this opportunity to now determine if we are a global
"""
super(VB2PYDirective, self).assignParent(*args, **kw)
#
# Check if we are a global level option - if se we set it now
if self.directive_type == "GlobalSet":
Config.setLocalOveride(
self.config_section, self.config_name, self.expression)
elif self.directive_type == "GlobalAdd":
Config.addLocalOveride(
self.config_section, self.config_name, self.expression)
class VBPass(VBCodeBlock):
"""Represents an empty statement"""
def renderAsCode(self, indent=0):
"""Render it!"""
return "%spass\n" % (self.getIndent(indent),)
class VBRenderDirect(VBCodeBlock):
"""Represents a pre-rendered statement"""
def __init__(self, text, indent=0, crlf=0):
"""Initialize"""
super(VBRenderDirect, self).__init__()
self.identifier = text
self.indent = indent
self.crlf = crlf
def renderAsCode(self, indent=0):
"""Render it!"""
s = ""
if self.indent:
s += self.getIndent(indent)
s += self.identifier
if self.crlf:
s += "\n"
return s
def asString(self):
"""Return string representation"""
return self.identifier
class VBNothing(VBCodeBlock):
"""Represents a block which renders to nothing at all"""
def renderAsCode(self, indent=0):
"""Render it!"""
return ""
class VBParserFailure(VBConsumer):
"""Represents a block which failed to parse"""
def renderAsCode(self, indent=0):
"""Render it!"""
fail_option = Config["General", "InsertIntoFailedCode"].lower()
warn = self.getWarning("ParserError", self.element.text, indent, crlf=1)
warn += self.getWarning(
"ParserStop", "Conversion of VB code halted", indent, crlf=1)
indentation = self.getIndent(indent)
message = 'VB2PY Code conversion failed at this point'
if fail_option == "exception":
warn += "%sraise NotImplemented('%s')" % (indentation, message)
elif fail_option == "warning":
warn += "%simport warnings;warnings.warn('%s')" % (indentation, message)
#
return warn
# FIXME: Circular import!
from vb2py.vbparser import *
# Blocks which do not contain valid statements
# If a block contains only these then it needs a pass
# statement to be a valid Python suite
NonCodeBlocks = (VBComment, VBUnrendered, VB2PYDirective) | return the first one that has it. If there are none then we just use the name
|
bit_writer.go | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package utils
import (
"encoding/binary"
"io"
"log"
"github.com/apache/arrow/go/v7/arrow/bitutil"
)
// WriterAtBuffer is a convenience struct for providing a WriteAt function
// to a byte slice for use with things that want an io.WriterAt
type WriterAtBuffer struct {
buf []byte
}
// NewWriterAtBuffer returns an object which fulfills the io.WriterAt interface
// by taking ownership of the passed in slice.
func NewWriterAtBuffer(buf []byte) WriterAtWithLen |
// Len returns the length of the underlying byte slice.
func (w *WriterAtBuffer) Len() int {
return len(w.buf)
}
// WriteAt fulfills the io.WriterAt interface to write len(p) bytes from p
// to the underlying byte slice starting at offset off. It returns the number
// of bytes written from p (0 <= n <= len(p)) and any error encountered.
func (w *WriterAtBuffer) WriteAt(p []byte, off int64) (n int, err error) {
if off > int64(len(w.buf)) {
return 0, io.ErrUnexpectedEOF
}
n = copy(w.buf[off:], p)
if n < len(p) {
err = io.ErrUnexpectedEOF
}
return
}
// WriterAtWithLen is an interface for an io.WriterAt with a Len function
type WriterAtWithLen interface {
io.WriterAt
Len() int
}
// BitWriter is a utility for writing values of specific bit widths to a stream
// using a uint64 as a buffer to build up between flushing for efficiency.
type BitWriter struct {
wr io.WriterAt
buffer uint64
byteoffset int
bitoffset uint
raw [8]byte
}
// NewBitWriter initializes a new bit writer to write to the passed in interface
// using WriteAt to write the appropriate offsets and values.
func NewBitWriter(w io.WriterAt) *BitWriter {
return &BitWriter{wr: w}
}
// ReserveBytes reserves the next aligned nbytes, skipping them and returning
// the offset to use with WriteAt to write to those reserved bytes. Used for
// RLE encoding to fill in the indicators after encoding.
func (b *BitWriter) ReserveBytes(nbytes int) int {
b.Flush(true)
ret := b.byteoffset
b.byteoffset += nbytes
return ret
}
// WriteAt fulfills the io.WriterAt interface to write len(p) bytes from p
// to the underlying byte slice starting at offset off. It returns the number
// of bytes written from p (0 <= n <= len(p)) and any error encountered.
// This allows writing full bytes directly to the underlying writer.
func (b *BitWriter) WriteAt(val []byte, off int64) (int, error) {
return b.wr.WriteAt(val, off)
}
// Written returns the number of bytes that have been written to the BitWriter,
// not how many bytes have been flushed. Use Flush to ensure that all data is flushed
// to the underlying writer.
func (b *BitWriter) Written() int {
return b.byteoffset + int(bitutil.BytesForBits(int64(b.bitoffset)))
}
// WriteValue writes the value v using nbits to pack it, returning false if it fails
// for some reason.
func (b *BitWriter) WriteValue(v uint64, nbits uint) error {
b.buffer |= v << b.bitoffset
b.bitoffset += nbits
if b.bitoffset >= 64 {
binary.LittleEndian.PutUint64(b.raw[:], b.buffer)
if _, err := b.wr.WriteAt(b.raw[:], int64(b.byteoffset)); err != nil {
return err
}
b.buffer = 0
b.byteoffset += 8
b.bitoffset -= 64
b.buffer = v >> (nbits - b.bitoffset)
}
return nil
}
// Flush will flush any buffered data to the underlying writer, pass true if
// the next write should be byte-aligned after this flush.
func (b *BitWriter) Flush(align bool) {
var nbytes int64
if b.bitoffset > 0 {
nbytes = bitutil.BytesForBits(int64(b.bitoffset))
binary.LittleEndian.PutUint64(b.raw[:], b.buffer)
b.wr.WriteAt(b.raw[:nbytes], int64(b.byteoffset))
}
if align {
b.buffer = 0
b.byteoffset += int(nbytes)
b.bitoffset = 0
}
}
// WriteAligned writes the value val as a little endian value in exactly nbytes
// byte-aligned to the underlying writer, flushing via Flush(true) before writing nbytes
// without buffering.
func (b *BitWriter) WriteAligned(val uint64, nbytes int) bool {
b.Flush(true)
binary.LittleEndian.PutUint64(b.raw[:], val)
if _, err := b.wr.WriteAt(b.raw[:nbytes], int64(b.byteoffset)); err != nil {
log.Println(err)
return false
}
b.byteoffset += nbytes
return true
}
// WriteVlqInt writes v as a vlq encoded integer byte-aligned to the underlying writer
// without buffering.
func (b *BitWriter) WriteVlqInt(v uint64) bool {
b.Flush(true)
var buf [binary.MaxVarintLen64]byte
nbytes := binary.PutUvarint(buf[:], v)
if _, err := b.wr.WriteAt(buf[:nbytes], int64(b.byteoffset)); err != nil {
log.Println(err)
return false
}
b.byteoffset += nbytes
return true
}
// WriteZigZagVlqInt writes a zigzag encoded integer byte-aligned to the underlying writer
// without buffering.
func (b *BitWriter) WriteZigZagVlqInt(v int64) bool {
return b.WriteVlqInt(uint64((v << 1) ^ (v >> 63)))
}
// Clear resets the writer so that subsequent writes will start from offset 0,
// allowing reuse of the underlying buffer and writer.
func (b *BitWriter) Clear() {
b.byteoffset = 0
b.bitoffset = 0
b.buffer = 0
}
| {
return &WriterAtBuffer{buf}
} |
batch.py | # Copyright 2017-2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from c7n.actions import BaseAction
from c7n.filters.vpc import SecurityGroupFilter, SubnetFilter
from c7n.manager import resources
from c7n.query import QueryResourceManager
from c7n.utils import local_session, type_schema
@resources.register('batch-compute')
class ComputeEnvironment(QueryResourceManager):
class resource_type(object):
service = 'batch'
filter_name = 'computeEnvironments'
filter_type = 'list'
dimension = None
id = name = "computeEnvironmentName"
enum_spec = (
'describe_compute_environments', 'computeEnvironments', None)
@ComputeEnvironment.filter_registry.register('security-group')
class ComputeSGFilter(SecurityGroupFilter):
RelatedIdsExpression = "computeResources.securityGroupIds"
@ComputeEnvironment.filter_registry.register('subnet')
class ComputeSubnetFilter(SubnetFilter):
RelatedIdsExpression = "computeResources.subnets"
@resources.register('batch-definition')
class JobDefinition(QueryResourceManager):
class resource_type(object):
service = 'batch'
filter_name = 'jobDefinitions'
filter_type = 'list'
dimension = None
id = name = "jobDefinitionName"
enum_spec = (
'describe_job_definitions', 'jobDefinitions', None)
class StateTransitionFilter(object):
"""Filter resources by state.
Try to simplify construction for policy authors by automatically
filtering elements (filters or actions) to the resource states
they are valid for.
"""
valid_origin_states = ()
def filter_resource_state(self, resources, key, states=None):
states = states or self.valid_origin_states
if not states:
return resources
orig_length = len(resources)
results = [r for r in resources if r[key] in states]
if orig_length != len(results):
self.log.warn(
"%s implicitly filtered %d of %d resources with valid %s" % (
self.__class__.__name__,
len(results), orig_length, key.lower()))
return results
@ComputeEnvironment.action_registry.register('update-environment')
class UpdateComputeEnvironment(BaseAction, StateTransitionFilter):
"""Updates an AWS batch compute environment
:example:
.. code-block: yaml
policies:
- name: update-environments
resource: batch-compute
filters:
- computeResources.desiredvCpus: 0
- state: ENABLED
actions:
- type: update-environment
state: DISABLED
"""
schema = {
'type': 'object',
'additionalProperties': False,
'properties': {
'type': {'enum': ['update-environment']},
'computeEnvironment': {'type': 'string'},
'state': {'type': 'string', 'enum': ['ENABLED', 'DISABLED']},
'computeResources': {
'type': 'object',
'additionalProperties': False,
'properties': {
'minvCpus': {'type': 'integer'},
'maxvCpus': {'type': 'integer'},
'desiredvCpus': {'type': 'integer'}
}
},
'serviceRole': {'type': 'string'}
}
}
permissions = ('batch:UpdateComputeEnvironment',)
valid_origin_status = ('VALID', 'INVALID')
def process(self, resources):
resources = self.filter_resource_state(
resources, 'status', self.valid_origin_status)
client = local_session(self.manager.session_factory).client('batch')
params = dict(self.data)
params.pop('type')
for r in resources:
params['computeEnvironment'] = r['computeEnvironmentName']
client.update_compute_environment(**params)
@ComputeEnvironment.action_registry.register('delete')
class DeleteComputeEnvironment(BaseAction, StateTransitionFilter):
"""Delete an AWS batch compute environment
:example:
.. code-block: yaml
policies:
- name: delete-environments
resource: batch-compute
filters:
- computeResources.desiredvCpus: 0
action:
- type: delete
"""
schema = type_schema('delete')
permissions = ('batch:DeleteComputeEnvironment',)
valid_origin_states = ('DISABLED',)
valid_origin_status = ('VALID', 'INVALID')
def delete_environment(self, r):
client = local_session(self.manager.session_factory).client('batch')
client.delete_compute_environment(
computeEnvironment=r['computeEnvironmentName'])
def process(self, resources):
resources = self.filter_resource_state(
self.filter_resource_state(
resources, 'state', self.valid_origin_states),
'status', self.valid_origin_status)
with self.executor_factory(max_workers=2) as w:
list(w.map(self.delete_environment, resources))
@JobDefinition.action_registry.register('deregister')
class DefinitionDeregister(BaseAction, StateTransitionFilter):
"""Deregisters a batch definition
:example:
.. code-block: yaml
policies:
- name: deregister-definition
resource: batch-definition
filters:
- containerProperties.image: amazonlinux
actions:
- type: deregister
"""
schema = type_schema('deregister')
permissions = ('batch:DeregisterJobDefinition',)
valid_origin_states = ('ACTIVE',)
def deregister_definition(self, r):
self.client.deregister_job_definition(
jobDefinition='%s:%s' % (r['jobDefinitionName'],
r['revision']))
def | (self, resources):
resources = self.filter_resource_state(
resources, 'status', self.valid_origin_states)
self.client = local_session(
self.manager.session_factory).client('batch')
with self.executor_factory(max_workers=2) as w:
list(w.map(self.deregister_definition, resources))
| process |
sg_tests.go | package main
import (
"context"
"flag"
"os"
"github.com/peterbourgon/ff/v3/ffcli"
"github.com/sourcegraph/sourcegraph/dev/sg/internal/run"
"github.com/sourcegraph/sourcegraph/lib/output"
)
var (
testFlagSet = flag.NewFlagSet("sg test", flag.ExitOnError)
testCommand = &ffcli.Command{
Name: "test",
ShortUsage: "sg test <testsuite>",
ShortHelp: "Run the given test suite.",
LongHelp: "Run the given test suite.",
FlagSet: testFlagSet,
Exec: testExec,
}
)
func testExec(ctx context.Context, args []string) error | {
ok, errLine := parseConf(*configFlag, *overwriteConfigFlag)
if !ok {
out.WriteLine(errLine)
os.Exit(1)
}
if len(args) == 0 {
out.WriteLine(output.Linef("", output.StyleWarning, "No test suite specified"))
return flag.ErrHelp
}
cmd, ok := globalConf.Tests[args[0]]
if !ok {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: test suite %q not found :(", args[0]))
return flag.ErrHelp
}
return run.Test(ctx, cmd, args[1:], globalConf.Env)
} |
|
aedes.js | 'use strict'
const EE = require('events').EventEmitter
const util = require('util')
const parallel = require('fastparallel')
const series = require('fastseries')
const uuidv4 = require('uuid/v4')
const bulk = require('bulk-write-stream')
const reusify = require('reusify')
const { pipeline } = require('readable-stream')
const Packet = require('aedes-packet')
const memory = require('aedes-persistence')
const mqemitter = require('mqemitter')
const Client = require('./lib/client')
module.exports = Aedes.Server = Aedes
const defaultOptions = {
concurrency: 100,
heartbeatInterval: 60000, // 1 minute
connectTimeout: 30000, // 30 secs
decodeProtocol: null,
preConnect: defaultPreConnect,
authenticate: defaultAuthenticate,
authorizePublish: defaultAuthorizePublish,
authorizeSubscribe: defaultAuthorizeSubscribe,
authorizeForward: defaultAuthorizeForward,
published: defaultPublished,
trustProxy: false,
trustedProxies: [],
queueLimit: 42
}
function Aedes (opts) {
const that = this
if (!(this instanceof Aedes)) {
return new Aedes(opts)
}
opts = Object.assign({}, defaultOptions, opts)
this.id = opts.id || uuidv4()
this.counter = 0
this.queueLimit = opts.queueLimit
this.connectTimeout = opts.connectTimeout
this.mq = opts.mq || mqemitter(opts)
this.handle = function handle (conn, req) {
conn.setMaxListeners(opts.concurrency * 2)
// create a new Client instance for a new connection
// return, just to please standard
return new Client(that, conn, req)
}
this.persistence = opts.persistence || memory()
this.persistence.broker = this
this._parallel = parallel()
this._series = series()
this._enqueuers = reusify(DoEnqueues)
this.preConnect = opts.preConnect
this.authenticate = opts.authenticate
this.authorizePublish = opts.authorizePublish
this.authorizeSubscribe = opts.authorizeSubscribe
this.authorizeForward = opts.authorizeForward
this.published = opts.published
this.decodeProtocol = opts.decodeProtocol
this.trustProxy = opts.trustProxy
this.trustedProxies = opts.trustedProxies
this.clients = {}
this.brokers = {}
const heartbeatTopic = '$SYS/' + that.id + '/heartbeat'
this._heartbeatInterval = setInterval(heartbeat, opts.heartbeatInterval)
const bufId = Buffer.from(that.id, 'utf8')
function heartbeat () {
that.publish({
topic: heartbeatTopic,
payload: bufId
}, noop)
}
function deleteOldBrokers (broker) {
if (that.brokers[broker] + (3 * opts.heartbeatInterval) < Date.now()) {
delete that.brokers[broker]
}
}
this._clearWillInterval = setInterval(function () {
Object.keys(that.brokers).forEach(deleteOldBrokers)
pipeline(
that.persistence.streamWill(that.brokers),
bulk.obj(receiveWills),
function done (err) {
if (err) {
that.emit('error', err)
}
}
)
}, opts.heartbeatInterval * 4)
function receiveWills (chunks, done) {
that._parallel(that, checkAndPublish, chunks, done)
}
function checkAndPublish (will, done) {
const needsPublishing =
!that.brokers[will.brokerId] ||
that.brokers[will.brokerId] + (3 * opts.heartbeatInterval) <
Date.now()
if (needsPublishing) {
// randomize this, so that multiple brokers
// do not publish the same wills at the same time
that.publish(will, function publishWill (err) {
if (err) {
return done(err)
}
that.persistence.delWill({
id: will.clientId,
brokerId: will.brokerId
}, done)
})
} else {
done()
}
}
this.mq.on('$SYS/+/heartbeat', function storeBroker (packet, done) {
that.brokers[packet.payload.toString()] = Date.now()
done()
})
this.mq.on('$SYS/+/new/clients', function closeSameClients (packet, done) {
const serverId = packet.topic.split('/')[1]
const clientId = packet.payload.toString()
if (that.clients[clientId] && serverId !== that.id) {
that.clients[clientId].close(done)
} else {
done()
}
})
// metadata
this.connectedClients = 0
this.closed = false
}
util.inherits(Aedes, EE)
function | (packet, done) {
if (packet.retain) {
this.broker.persistence.storeRetained(packet, done)
} else {
done()
}
}
function emitPacket (packet, done) {
packet.retain = false
this.broker.mq.emit(packet, done)
}
function enqueueOffline (packet, done) {
var enqueuer = this.broker._enqueuers.get()
enqueuer.complete = done
enqueuer.packet = packet
enqueuer.topic = packet.topic
enqueuer.broker = this.broker
this.broker.persistence.subscriptionsByTopic(
packet.topic,
enqueuer.done
)
}
function DoEnqueues () {
this.next = null
this.complete = null
this.packet = null
this.topic = null
this.broker = null
const that = this
this.done = function doneEnqueue (err, subs) {
const broker = that.broker
if (err) {
// is this really recoverable?
// let's just error the whole aedes
broker.emit('error', err)
return
}
if (that.topic.indexOf('$SYS') === 0) {
subs = subs.filter(removeSharp)
}
const packet = that.packet
const complete = that.complete
that.packet = null
that.complete = null
that.topic = null
broker.persistence.outgoingEnqueueCombi(subs, packet, complete)
broker._enqueuers.release(that)
}
}
// + is 43
// # is 35
function removeSharp (sub) {
const code = sub.topic.charCodeAt(0)
return code !== 43 && code !== 35
}
function callPublished (_, done) {
this.broker.published(this.packet, this.client, done)
this.broker.emit('publish', this.packet, this.client)
}
const publishFuncsSimple = [
storeRetained,
emitPacket,
callPublished
]
const publishFuncsQoS = [
storeRetained,
enqueueOffline,
emitPacket,
callPublished
]
Aedes.prototype.publish = function (packet, client, done) {
if (typeof client === 'function') {
done = client
client = null
}
var p = new Packet(packet, this)
const publishFuncs = p.qos > 0 ? publishFuncsQoS : publishFuncsSimple
this._series(new PublishState(this, client, packet), publishFuncs, p, done)
}
Aedes.prototype.subscribe = function (topic, func, done) {
this.mq.on(topic, func, done)
}
Aedes.prototype.unsubscribe = function (topic, func, done) {
this.mq.removeListener(topic, func, done)
}
Aedes.prototype.registerClient = function (client) {
const that = this
if (this.clients[client.id]) {
// [MQTT-3.1.4-2]
this.clients[client.id].close(function closeClient () {
that._finishRegisterClient(client)
})
} else {
this._finishRegisterClient(client)
}
}
Aedes.prototype._finishRegisterClient = function (client) {
this.connectedClients++
this.clients[client.id] = client
this.emit('client', client)
this.publish({
topic: '$SYS/' + this.id + '/new/clients',
payload: Buffer.from(client.id, 'utf8')
}, noop)
}
Aedes.prototype.unregisterClient = function (client) {
this.connectedClients--
delete this.clients[client.id]
this.emit('clientDisconnect', client)
this.publish({
topic: '$SYS/' + this.id + '/disconnect/clients',
payload: Buffer.from(client.id, 'utf8')
}, noop)
}
function closeClient (client, cb) {
this.clients[client].close(cb)
}
Aedes.prototype.close = function (cb = noop) {
const that = this
if (this.closed) {
return cb()
}
this.closed = true
clearInterval(this._heartbeatInterval)
clearInterval(this._clearWillInterval)
this._parallel(this, closeClient, Object.keys(this.clients), doneClose)
function doneClose () {
that.emit('closed')
that.mq.close(cb)
}
}
Aedes.prototype.version = require('./package.json').version
function defaultPreConnect (client, callback) {
callback(null, true)
}
function defaultAuthenticate (client, username, password, callback) {
callback(null, true)
}
function defaultAuthorizePublish (client, packet, callback) {
callback(null)
}
function defaultAuthorizeSubscribe (client, sub, callback) {
callback(null, sub)
}
function defaultAuthorizeForward (client, packet) {
return packet
}
function defaultPublished (packet, client, callback) {
callback(null)
}
function PublishState (broker, client, packet) {
this.broker = broker
this.client = client
this.packet = packet
}
function noop () {}
| storeRetained |
wrappers.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from marshmallow import fields
from polyaxon_schemas.ml.layers.base import BaseLayerConfig, BaseLayerSchema
class WrapperSchema(BaseLayerSchema):
layer = fields.Nested('LayerSchema')
@staticmethod
def schema_config():
return WrapperConfig
class WrapperConfig(BaseLayerConfig):
"""Abstract wrapper base class.
Wrappers take another layer and augment it in various ways.
Do not use this class as a layer, it is only an abstract base class.
Two usable wrappers are the `TimeDistributed` and `Bidirectional` wrappers.
Args:
layer: The layer to be wrapped.
"""
IDENTIFIER = 'Wrapper'
SCHEMA = WrapperSchema
def __init__(self, layer, **kwargs):
super(WrapperConfig, self).__init__(**kwargs)
self.layer = layer
class TimeDistributedSchema(WrapperSchema):
@staticmethod
def schema_config():
return TimeDistributedConfig
class TimeDistributedConfig(WrapperConfig):
"""This wrapper allows to apply a layer to every temporal slice of an input.
The input should be at least 3D, and the dimension of index one
will be considered to be the temporal dimension.
Consider a batch of 32 samples,
where each sample is a sequence of 10 vectors of 16 dimensions.
The batch input shape of the layer is then `(32, 10, 16)`,
and the `input_shape`, not including the samples dimension, is `(10, 16)`.
You can then use `TimeDistributed` to apply a `Dense` layer
to each of the 10 timesteps, independently:
```python
# as the first layer in a model
x = TimeDistributed(Dense(8))(x)
# now x.output_shape == (None, 10, 8)
```
The output will then have shape `(32, 10, 8)`.
In subsequent layers, there is no need for the `input_shape`:
```python
x = TimeDistributed(Dense(32))(x)
# now x.output_shape == (None, 10, 32)
```
The output will then have shape `(32, 10, 32)`.
`TimeDistributed` can be used with arbitrary layers, not just `Dense`,
for instance with a `Conv2D` layer:
```python
x = TimeDistributed(Conv2D(64, (3, 3)))(x)
```
Args:
layer: a layer instance.
Polyaxonfile usage:
```yaml
TimeDistributed:
layer:
Dense:
units: 2
```
"""
IDENTIFIER = 'TimeDistributed'
SCHEMA = TimeDistributedSchema
class BidirectionalSchema(WrapperSchema):
@staticmethod
def schema_config():
return BidirectionalConfig
class | (WrapperConfig):
"""Bidirectional wrapper for RNNs.
Args:
layer: `Recurrent` instance.
merge_mode: Mode by which outputs of the
forward and backward RNNs will be combined.
One of {'sum', 'mul', 'concat', 'ave', None}.
If None, the outputs will not be combined,
they will be returned as a list.
Raises:
ValueError: In case of invalid `merge_mode` argument.
Example:
```python
x = Bidirectional(plx.layers.LSTM(units=128, dropout=0.2, recurrent_dropout=0.2))(x)
```
Polyaxonfile usage:
```yaml
Bidirectional:
layer:
LSTM:
units: 128
dropout: 0.2
recurrent_dropout: 0.2
```
"""
IDENTIFIER = 'Bidirectional'
SCHEMA = BidirectionalSchema
| BidirectionalConfig |
bayesian_optimization.py | import logging
import george
import numpy as np
import inspect
from pybnn import BaseModel
from pybnn.dngo import DNGO
from robo.priors.default_priors import DefaultPrior
from robo.models.base_model import BaseModel as BaseModel_
from robo.models.wrapper_bohamiann import WrapperBohamiann
from robo.models.gaussian_process import GaussianProcess
from robo.models.gaussian_process_mcmc import GaussianProcessMCMC
from robo.models.random_forest import RandomForest
from robo.maximizers.base_maximizer import BaseMaximizer
from robo.maximizers.scipy_optimizer import SciPyOptimizer
from robo.maximizers.random_sampling import RandomSampling
from robo.maximizers.differential_evolution import DifferentialEvolution
from robo.solver.bayesian_optimization import BayesianOptimization
from robo.acquisition_functions.base_acquisition import BaseAcquisitionFunction
from robo.acquisition_functions.ei import EI
from robo.acquisition_functions.pi import PI
from robo.acquisition_functions.log_ei import LogEI
from robo.acquisition_functions.lcb import LCB
from robo.acquisition_functions.marginalization import MarginalizationGPMCMC
from robo.initial_design import init_latin_hypercube_sampling
logger = logging.getLogger(__name__)
def bayesian_optimization(objective_function, lower, upper, num_iterations=30, X_init=None, Y_init=None,
maximizer="random", acquisition_func="log_ei", model_type="gp_mcmc",
n_init=3, rng=None, output_path=None):
"""
General interface for Bayesian optimization for global black box
optimization problems.
Parameters
----------
objective_function: function
The objective function that is minimized. This function gets a numpy
array (D,) as input and returns the function value (scalar)
lower: np.ndarray (D,)
The lower bound of the search space
upper: np.ndarray (D,)
The upper bound of the search space
num_iterations: int
The number of iterations (initial design + BO)
X_init: np.ndarray(N,D)
Initial points to warmstart BO
Y_init: np.ndarray(N,1)
Function values of the already initial points
maximizer: {"random", "scipy", "differential_evolution"}
The optimizer for the acquisition function.
acquisition_func: {"ei", "log_ei", "lcb", "pi"}
The acquisition function
model_type: {"gp", "gp_mcmc", "rf", "bohamiann", "dngo"}
The model for the objective function.
n_init: int
Number of points for the initial design. Make sure that it
is <= num_iterations.
output_path: string
Specifies the path where the intermediate output after each iteration will be saved.
If None no output will be saved to disk.
rng: numpy.random.RandomState
Random number generator
Returns
-------
dict with all results
"""
assert upper.shape[0] == lower.shape[0], "Dimension miss match"
assert np.all(lower < upper), "Lower bound >= upper bound"
assert n_init <= num_iterations, "Number of initial design point has to be <= than the number of iterations"
if rng is None:
rng = np.random.RandomState(np.random.randint(0, 10000))
cov_amp = 2
n_dims = lower.shape[0]
initial_ls = np.ones([n_dims])
exp_kernel = george.kernels.Matern52Kernel(initial_ls,
ndim=n_dims)
kernel = cov_amp * exp_kernel
prior = DefaultPrior(len(kernel) + 1)
n_hypers = 3 * len(kernel)
if n_hypers % 2 == 1:
n_hypers += 1
if model_type == "gp":
model = GaussianProcess(kernel, prior=prior, rng=rng,
normalize_output=False, normalize_input=True,
lower=lower, upper=upper)
elif model_type == "gp_mcmc":
model = GaussianProcessMCMC(kernel, prior=prior,
n_hypers=n_hypers,
chain_length=200,
burnin_steps=100,
normalize_input=True,
normalize_output=False,
rng=rng, lower=lower, upper=upper)
elif model_type == "rf": | elif model_type == "bohamiann":
model = WrapperBohamiann()
elif model_type == "dngo":
model = DNGO()
elif isinstance(model_type, (BaseModel, BaseModel_)):
model = model_type
elif callable(model_type):
model = model_type()
else:
raise ValueError("'{}' is not a valid model".format(model_type))
if acquisition_func == "ei":
a = EI(model)
elif acquisition_func == "log_ei":
a = LogEI(model)
elif acquisition_func == "pi":
a = PI(model)
elif acquisition_func == "lcb":
a = LCB(model)
elif isinstance(acquisition_func, BaseAcquisitionFunction):
a = acquisition_func
elif callable(acquisition_func):
a = acquisition_func(model)
else:
raise ValueError("'{}' is not a valid acquisition function"
.format(acquisition_func))
if model_type == "gp_mcmc":
acquisition_func = MarginalizationGPMCMC(a)
else:
acquisition_func = a
if maximizer == "random":
max_func = RandomSampling(acquisition_func, lower, upper, rng=rng)
elif maximizer == "scipy":
max_func = SciPyOptimizer(acquisition_func, lower, upper, rng=rng)
elif maximizer == "differential_evolution":
max_func = DifferentialEvolution(acquisition_func, lower, upper, rng=rng)
elif isinstance(maximizer, BaseMaximizer):
max_func = maximizer
elif callable(maximizer):
max_func = maximizer(acquisition_func, lower, upper, rng=rng)
else:
raise ValueError("'{}' is not a valid function to maximize the "
"acquisition function".format(maximizer))
bo = BayesianOptimization(objective_function, lower, upper,
acquisition_func, model, max_func,
initial_points=n_init, rng=rng,
initial_design=init_latin_hypercube_sampling,
output_path=output_path)
x_best, f_min = bo.run(num_iterations, X=X_init, y=Y_init)
results = dict()
results["x_opt"] = x_best
results["f_opt"] = f_min
results["incumbents"] = [inc for inc in bo.incumbents]
results["incumbent_values"] = [val for val in bo.incumbents_values]
results["runtime"] = bo.runtime
results["overhead"] = bo.time_overhead
results["X"] = [x.tolist() for x in bo.X]
results["y"] = [y for y in bo.y]
return results | model = RandomForest(rng=rng)
|
thruster_pwm_tsl.py | import rclpy
from rclpy.node import Node
from geometry_msgs.msg import Vector3
from std_msgs.msg import Int16
from rclpy.qos import QoSPresetProfiles
from ament_index_python import get_package_share_directory
import numpy as np
import sys, os
from .parameters import force
from .flags import flags
def create_pwm(value, resolution):
if value < 0.0:
value = -value
if value > 1.0:
value = 1.0
return np.concatenate((np.ones(np.floor(resolution * value).astype(np.int32)), np.zeros(np.ceil(resolution * (1 - value)).astype(np.int32))))
class ThrustController(Node):
def __init__(self):
super().__init__('thrust_controller')
self.declare_parameter('verbose', 0)
self.declare_parameter('frequency', 10)
self.declare_parameter('resolution', 100)
self.verbose = self.get_parameter('verbose').get_parameter_value().integer_value
self.frequency = self.get_parameter('frequency').get_parameter_value().integer_value
self.resolution = self.get_parameter('resolution').get_parameter_value().integer_value
sys.path.insert(1, os.path.join(get_package_share_directory('slider_experiment'), 'python_build/tsl_optimizer'))
import tsl_optimizer as optimizer
self.solver = optimizer.solver()
self.signals = [create_pwm(0, self.resolution) for i in range(8)]
self.i = 0
self.create_subscription(Vector3, 'thrust_cmd', self.callback, QoSPresetProfiles.get_from_short_key('system_default'))
self.pub = self.create_publisher(Int16, 'thruster_flags', QoSPresetProfiles.get_from_short_key('sensor_data'))
self.create_timer(1/(self.frequency * self.resolution), self.send_signals)
def callback(self, msg: Vector3):
T = self.solver.run(p = [msg.x, msg.y, msg.z]).solution
if self.verbose > 0:
self.get_logger().info(f'\n Fx = {msg.x: 2.2f}\n Fy = {msg.y: 2.2f}\ntau = {msg.z: 2.2f}')
self.get_logger().info(f'cmd: {T}')
self.signals = [create_pwm(T[i] / force, self.resolution) for i in range(8)]
def send_signals(self):
|
def main(args=None):
rclpy.init(args=args)
node = ThrustController()
rclpy.spin(node)
node.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main() | req = Int16()
tmp = 0
for i in range(8):
if self.signals[i][self.i] == 1:
tmp ^= flags[i]
try:
req.data = tmp
except AssertionError:
print(tmp)
self.i += 1
self.i %= self.resolution
self.pub.publish(req) |
reset_unseen_count_request_builder.go | package resetunseencount
import (
ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9 "github.com/microsoft/kiota/abstractions/go"
)
// ResetUnseenCountRequestBuilder builds and executes requests for operations under \groups\{group-id}\microsoft.graph.resetUnseenCount
type ResetUnseenCountRequestBuilder struct {
// Path parameters for the request
pathParameters map[string]string;
// The request adapter to use to execute the requests.
requestAdapter ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestAdapter;
// Url template to use to build the URL for the current request builder
urlTemplate string;
}
// ResetUnseenCountRequestBuilderPostOptions options for Post
type ResetUnseenCountRequestBuilderPostOptions struct {
// Request headers
H map[string]string;
// Request options
O []ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestOption;
// Response handler to use in place of the default response handling provided by the core service
ResponseHandler ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.ResponseHandler;
}
// NewResetUnseenCountRequestBuilderInternal instantiates a new ResetUnseenCountRequestBuilder and sets the default values.
func NewResetUnseenCountRequestBuilderInternal(pathParameters map[string]string, requestAdapter ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestAdapter)(*ResetUnseenCountRequestBuilder) |
// NewResetUnseenCountRequestBuilder instantiates a new ResetUnseenCountRequestBuilder and sets the default values.
func NewResetUnseenCountRequestBuilder(rawUrl string, requestAdapter ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestAdapter)(*ResetUnseenCountRequestBuilder) {
urlParams := make(map[string]string)
urlParams["request-raw-url"] = rawUrl
return NewResetUnseenCountRequestBuilderInternal(urlParams, requestAdapter)
}
// CreatePostRequestInformation invoke action resetUnseenCount
func (m *ResetUnseenCountRequestBuilder) CreatePostRequestInformation(options *ResetUnseenCountRequestBuilderPostOptions)(*ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestInformation, error) {
requestInfo := ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.NewRequestInformation()
requestInfo.UrlTemplate = m.urlTemplate
requestInfo.PathParameters = m.pathParameters
requestInfo.Method = ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.POST
if options != nil && options.H != nil {
requestInfo.Headers = options.H
}
if options != nil && len(options.O) != 0 {
err := requestInfo.AddRequestOptions(options.O...)
if err != nil {
return nil, err
}
}
return requestInfo, nil
}
// Post invoke action resetUnseenCount
func (m *ResetUnseenCountRequestBuilder) Post(options *ResetUnseenCountRequestBuilderPostOptions)(error) {
requestInfo, err := m.CreatePostRequestInformation(options);
if err != nil {
return err
}
err = m.requestAdapter.SendNoContentAsync(*requestInfo, nil, nil)
if err != nil {
return err
}
return nil
}
| {
m := &ResetUnseenCountRequestBuilder{
}
m.urlTemplate = "{+baseurl}/groups/{group_id}/microsoft.graph.resetUnseenCount";
urlTplParams := make(map[string]string)
for idx, item := range pathParameters {
urlTplParams[idx] = item
}
m.pathParameters = pathParameters;
m.requestAdapter = requestAdapter;
return m
} |
difference_by_key.rs | use crate::set::Set;
use crate::{exponential_offset_ge_by_key, SetOperation, Collection};
/// Represent the _difference_ set operation that will be applied to two slices of different types.
///
/// # Examples
/// ```
/// # use sdset::Error;
/// # fn try_main() -> Result<(), Error> {
/// use sdset::duo::OpBuilderByKey;
/// use sdset::{SetOperation, Set, SetBuf};
///
/// #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
/// struct Foo { a: i32, b: u8 }
///
/// let a = Set::new(&[
/// Foo{ a: 1, b: 6 },
/// Foo{ a: 1, b: 7 },
/// Foo{ a: 1, b: 8 },
/// Foo{ a: 2, b: 9 },
/// Foo{ a: 2, b: 10 },
/// Foo{ a: 3, b: 10 },
/// ])?;
/// let b = Set::new(&[1, 3, 4, 5]).unwrap();
///
/// // Return the field of Foo that will be used for comparison
/// let f = |x: &Foo| x.a;
///
/// // directly use the i32 for comparison
/// let g = |x: &i32| *x;
///
/// let op = OpBuilderByKey::new(a, b, f, g).difference();
/// let res: SetBuf<Foo> = op.into_set_buf();
///
/// assert_eq!(res.as_slice(), &[Foo{ a: 2, b: 9 }, Foo{ a: 2, b: 10 }][..]);
/// # Ok(()) }
/// # try_main().unwrap();
/// ```
#[derive(Copy, Clone)]
pub struct DifferenceByKey<'a, T: 'a, U: 'a, F, G, K>
where F: Fn(&T) -> K,
G: Fn(&U) -> K,
K: Ord,
{
a: &'a [T],
b: &'a [U],
f: F,
g: G,
}
impl<'a, T, U, F, G, K> DifferenceByKey<'a, T, U, F, G, K>
where F: Fn(&T) -> K,
G: Fn(&U) -> K,
K: Ord,
{
/// Construct one with slices checked to be sorted and deduplicated.
pub fn new(a: &'a Set<T>, b: &'a Set<U>, f: F, g: G) -> Self {
Self {
a: a.as_slice(),
b: b.as_slice(),
f: f,
g: g,
}
}
}
impl<'a, T, U, F, G, K> DifferenceByKey<'a, T, U, F, G, K>
where F: Fn(&T) -> K,
G: Fn(&U) -> K,
K: Ord,
{
fn extend_collection<C, X, E>(mut self, output: &mut C, extend: E)
where C: Collection<X>,
E: Fn(&mut C, &'a [T]),
{
while let Some(first) = self.a.first().map(|x| (self.f)(x)) {
self.b = exponential_offset_ge_by_key(self.b, &first, &self.g);
match self.b.first().map(|x| (self.g)(x)) {
Some(min) => {
if min == first {
self.a = exponential_offset_ge_by_key(&self.a[1..], &min, &self.f)
} else {
let off = self.a.iter().take_while(|&x| (self.f)(x) < min).count();
extend(output, &self.a[..off]);
self.a = &self.a[off..]
}
},
None => {
extend(output, self.a);
break;
},
}
}
}
}
impl<'a, T, U, F, G, K> SetOperation<T> for DifferenceByKey<'a, T, U, F, G, K>
where T: Clone,
F: Fn(&T) -> K,
G: Fn(&U) -> K,
K: Ord,
{
fn extend_collection<C>(self, output: &mut C) where C: Collection<T> {
self.extend_collection(output, Collection::extend_from_slice)
}
}
impl<'a, T, U, F, G, K> SetOperation<&'a T> for DifferenceByKey<'a, T, U, F, G, K>
where F: Fn(&T) -> K,
G: Fn(&U) -> K,
K: Ord,
{
fn extend_collection<C>(self, output: &mut C) where C: Collection<&'a T> |
}
#[cfg(test)]
mod tests {
use super::*;
use crate::set::{sort_dedup_vec, SetBuf};
#[derive(Debug, Clone, PartialEq, Eq)]
struct Foo {
a: i32,
b: i8,
}
#[test]
fn difference_empty_no_duplicates() {
let a = Set::new_unchecked(&[
Foo{ a: 1, b: 8 },
Foo{ a: 2, b: 9 },
Foo{ a: 3, b: 10 },
Foo{ a: 4, b: 11 },
Foo{ a: 5, b: 12 },
]);
let b = Set::new(&[1, 2, 3, 4, 5]).unwrap();
let difference: SetBuf<Foo> = DifferenceByKey::new(a, b, |x| x.a, |&x| x).into_set_buf();
assert!(difference.is_empty());
}
#[test]
fn difference_empty_duplicate_relations() {
let a = Set::new_unchecked(&[
Foo{ a: 1, b: 6 },
Foo{ a: 1, b: 7 },
Foo{ a: 1, b: 8 },
Foo{ a: 2, b: 9 },
Foo{ a: 2, b: 10 },
]);
let b = Set::new(&[1, 2, 3, 4, 5]).unwrap();
let difference: SetBuf<Foo> = DifferenceByKey::new(a, b, |x| x.a, |&x| x).into_set_buf();
assert!(difference.is_empty());
}
#[test]
fn difference_non_empty_duplicate_relations() {
let a = Set::new_unchecked(&[
Foo{ a: 1, b: 6 },
Foo{ a: 1, b: 7 },
Foo{ a: 1, b: 8 },
Foo{ a: 2, b: 9 },
Foo{ a: 2, b: 10 },
]);
let b = Set::new(&[1, 3, 4, 5]).unwrap();
let difference: SetBuf<Foo> = DifferenceByKey::new(a, b, |x| x.a, |&x| x).into_set_buf();
assert_eq!(difference.as_slice(), &[
Foo{ a: 2, b: 9 },
Foo{ a: 2, b: 10 },
][..]);
}
quickcheck! {
fn qc_difference(a: Vec<i32>, b: Vec<i64>) -> bool {
use std::collections::BTreeSet;
use std::iter::FromIterator;
let mut a = a;
let mut b = b;
sort_dedup_vec(&mut a);
sort_dedup_vec(&mut b);
let x: SetBuf<i32> = {
let difference = DifferenceByKey { a: &a, b: &b, f: |&x| x, g: |&x| x as i32 };
difference.into_set_buf()
};
let a = BTreeSet::from_iter(a);
let b = BTreeSet::from_iter(b.into_iter().map(|x| x as i32));
let y = a.difference(&b);
let y: Vec<_> = y.cloned().collect();
x.as_slice() == y.as_slice()
}
}
}
#[cfg(all(feature = "unstable", test))]
mod bench {
extern crate test;
use super::*;
use self::test::Bencher;
use crate::set::SetBuf;
#[derive(Debug, Clone)]
pub struct Foo {
a: i32,
b: u8
}
impl Foo {
fn new(a: i32) -> Foo {
Foo { a, b: 0 }
}
}
#[bench]
fn two_slices_big(bench: &mut Bencher) {
let a: Vec<_> = (0..100).map(Foo::new).collect();
let b: Vec<_> = (1..101).collect();
let f = |x: &Foo| x.a;
let g = |x: &i32| *x;
bench.iter(|| {
let op = DifferenceByKey { a: &a, b: &b, f, g };
let res: SetBuf<Foo> = op.into_set_buf();
test::black_box(|| res);
});
}
#[bench]
fn two_slices_big2(bench: &mut Bencher) {
let a: Vec<_> = (0..100).map(Foo::new).collect();
let b: Vec<_> = (51..151).collect();
let f = |x: &Foo| x.a;
let g = |x: &i32| *x;
bench.iter(|| {
let op = DifferenceByKey { a: &a, b: &b, f, g };
let res: SetBuf<Foo> = op.into_set_buf();
test::black_box(|| res);
});
}
#[bench]
fn two_slices_big3(bench: &mut Bencher) {
let a: Vec<_> = (0..100).map(Foo::new).collect();
let b: Vec<_> = (100..200).collect();
let f = |x: &Foo| x.a;
let g = |x: &i32| *x;
bench.iter(|| {
let op = DifferenceByKey { a: &a, b: &b, f, g };
let res: SetBuf<Foo> = op.into_set_buf();
test::black_box(|| res);
});
}
}
| {
self.extend_collection(output, Collection::extend)
} |
naivebayes_crossvalidation.py | '''
Created on Sat Nov 05 2016
Copyright (c) 2016 Leniel Macaferi's Consulting
'''
import os | path = os.path.realpath('..')
# Loading the data used to train
trainingSet = pd.read_csv(os.path.join(path, '../Data/classification-training.csv'), sep=',', header = None)
classes = trainingSet[trainingSet.columns[22]] # Last column
features = trainingSet[trainingSet.columns[1:22]] # Columns between indexes 1 to 22
#pd.set_option('display.max_columns', 23)
#print(features)
classifier = GaussianNB()
scores = cross_val_score(classifier, features, classes, cv = 5)
print(scores)
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2)) | import pandas as pd
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import cross_val_score
|
stacktester.go | /*
* stacktester.go
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"bytes"
"encoding/binary"
"encoding/hex"
"fmt"
"github.com/apple/foundationdb/bindings/go/src/fdb"
"github.com/apple/foundationdb/bindings/go/src/fdb/tuple"
"log"
"math/big"
"os"
"reflect"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"time"
)
const verbose bool = false
var trMap = map[string]fdb.Transaction{}
var trMapLock = sync.RWMutex{}
// Make tuples sortable by byte-order
type byBytes []tuple.Tuple
func (b byBytes) Len() int {
return len(b)
}
func (b byBytes) Swap(i, j int) {
b[i], b[j] = b[j], b[i]
}
func (b byBytes) Less(i, j int) bool {
return bytes.Compare(b[i].Pack(), b[j].Pack()) < 0
}
func int64ToBool(i int64) bool {
switch i {
case 0:
return false
default:
return true
}
}
type stackEntry struct {
item interface{}
idx int
}
type StackMachine struct {
prefix []byte
trName string
stack []stackEntry
lastVersion int64
threads sync.WaitGroup
verbose bool
de *DirectoryExtension
}
func newStackMachine(prefix []byte, verbose bool) *StackMachine |
func (sm *StackMachine) waitAndPop() (ret stackEntry) {
defer func() {
if r := recover(); r != nil {
switch r := r.(type) {
case fdb.Error:
ret.item = []byte(tuple.Tuple{[]byte("ERROR"), []byte(fmt.Sprintf("%d", r.Code))}.Pack())
default:
panic(r)
}
}
}()
ret, sm.stack = sm.stack[len(sm.stack)-1], sm.stack[:len(sm.stack)-1]
switch el := ret.item.(type) {
case []byte:
ret.item = el
case int64, uint64, *big.Int, string, bool, tuple.UUID, float32, float64, tuple.Tuple:
ret.item = el
case fdb.Key:
ret.item = []byte(el)
case fdb.FutureNil:
el.MustGet()
ret.item = []byte("RESULT_NOT_PRESENT")
case fdb.FutureByteSlice:
v := el.MustGet()
if v != nil {
ret.item = v
} else {
ret.item = []byte("RESULT_NOT_PRESENT")
}
case fdb.FutureKey:
ret.item = []byte(el.MustGet())
case nil:
default:
log.Fatalf("Don't know how to pop stack element %v %T\n", el, el)
}
return
}
func (sm *StackMachine) popSelector() fdb.KeySelector {
sel := fdb.KeySelector{fdb.Key(sm.waitAndPop().item.([]byte)), int64ToBool(sm.waitAndPop().item.(int64)), int(sm.waitAndPop().item.(int64))}
return sel
}
func (sm *StackMachine) popKeyRange() fdb.KeyRange {
kr := fdb.KeyRange{fdb.Key(sm.waitAndPop().item.([]byte)), fdb.Key(sm.waitAndPop().item.([]byte))}
return kr
}
func (sm *StackMachine) popRangeOptions() fdb.RangeOptions {
ro := fdb.RangeOptions{Limit: int(sm.waitAndPop().item.(int64)), Reverse: int64ToBool(sm.waitAndPop().item.(int64)), Mode: fdb.StreamingMode(sm.waitAndPop().item.(int64) + 1)}
return ro
}
func (sm *StackMachine) popPrefixRange() fdb.ExactRange {
er, e := fdb.PrefixRange(sm.waitAndPop().item.([]byte))
if e != nil {
panic(e)
}
return er
}
func (sm *StackMachine) pushRange(idx int, sl []fdb.KeyValue, prefixFilter []byte) {
var t tuple.Tuple = make(tuple.Tuple, 0, len(sl)*2)
for _, kv := range sl {
if prefixFilter == nil || bytes.HasPrefix(kv.Key, prefixFilter) {
t = append(t, kv.Key)
t = append(t, kv.Value)
}
}
sm.store(idx, []byte(t.Pack()))
}
func (sm *StackMachine) store(idx int, item interface{}) {
sm.stack = append(sm.stack, stackEntry{item, idx})
}
func tupleToString(t tuple.Tuple) string {
var buffer bytes.Buffer
buffer.WriteByte('(')
for i, el := range t {
if i > 0 {
buffer.WriteString(", ")
}
switch el := el.(type) {
case int64, uint64:
buffer.WriteString(fmt.Sprintf("%d", el))
case *big.Int:
buffer.WriteString(fmt.Sprintf("%s", el))
case []byte:
buffer.WriteString(fmt.Sprintf("%+q", string(el)))
case string:
buffer.WriteString(fmt.Sprintf("%+q", el))
case bool:
buffer.WriteString(fmt.Sprintf("%t", el))
case tuple.UUID:
buffer.WriteString(hex.EncodeToString(el[:]))
case float32, float64:
buffer.WriteString(fmt.Sprintf("%f", el))
case nil:
buffer.WriteString("nil")
case tuple.Tuple:
buffer.WriteString(tupleToString(el))
default:
log.Fatalf("Don't know how to stringify tuple elemement %v %T\n", el, el)
}
}
buffer.WriteByte(')')
return buffer.String()
}
func (sm *StackMachine) dumpStack() {
for i := len(sm.stack) - 1; i >= 0; i-- {
fmt.Printf(" %d.", sm.stack[i].idx)
el := sm.stack[i].item
switch el := el.(type) {
case int64, uint64:
fmt.Printf(" %d", el)
case *big.Int:
fmt.Printf(" %s", el)
case fdb.FutureNil:
fmt.Printf(" FutureNil")
case fdb.FutureByteSlice:
fmt.Printf(" FutureByteSlice")
case fdb.FutureKey:
fmt.Printf(" FutureKey")
case []byte:
fmt.Printf(" %+q", string(el))
case fdb.Key:
fmt.Printf(" %+q", string(el))
case string:
fmt.Printf(" %+q", el)
case bool:
fmt.Printf(" %t", el)
case tuple.Tuple:
fmt.Printf(" %s", tupleToString(el))
case tuple.UUID:
fmt.Printf(" %s", hex.EncodeToString(el[:]))
case float32, float64:
fmt.Printf(" %f", el)
case nil:
fmt.Printf(" nil")
default:
log.Fatalf("Don't know how to dump stack element %v %T\n", el, el)
}
if i != 0 {
fmt.Printf(",")
}
}
}
func (sm *StackMachine) executeMutation(t fdb.Transactor, f func(fdb.Transaction) (interface{}, error), isDB bool, idx int) {
_, e := t.Transact(f)
if e != nil {
panic(e)
}
if isDB {
sm.store(idx, []byte("RESULT_NOT_PRESENT"))
}
}
func (sm *StackMachine) checkWatches(watches [4]fdb.FutureNil, expected bool) bool {
for _, watch := range watches {
if watch.IsReady() || expected {
e := watch.Get()
if e != nil {
switch e := e.(type) {
case fdb.Error:
tr, tr_error := db.CreateTransaction()
if tr_error != nil {
panic(tr_error)
}
tr.OnError(e).MustGet()
default:
panic(e)
}
}
if !expected {
return false
}
}
}
return true
}
func (sm *StackMachine) testWatches() {
for {
_, e := db.Transact(func(tr fdb.Transaction) (interface{}, error) {
tr.Set(fdb.Key("w0"), []byte("0"))
tr.Set(fdb.Key("w2"), []byte("2"))
tr.Set(fdb.Key("w3"), []byte("3"))
return nil, nil
})
if e != nil {
panic(e)
}
var watches [4]fdb.FutureNil
_, e = db.Transact(func(tr fdb.Transaction) (interface{}, error) {
watches[0] = tr.Watch(fdb.Key("w0"))
watches[1] = tr.Watch(fdb.Key("w1"))
watches[2] = tr.Watch(fdb.Key("w2"))
watches[3] = tr.Watch(fdb.Key("w3"))
tr.Set(fdb.Key("w0"), []byte("0"))
tr.Clear(fdb.Key("w1"))
return nil, nil
})
if e != nil {
panic(e)
}
time.Sleep(5 * time.Second)
if !sm.checkWatches(watches, false) {
continue
}
_, e = db.Transact(func(tr fdb.Transaction) (interface{}, error) {
tr.Set(fdb.Key("w0"), []byte("a"))
tr.Set(fdb.Key("w1"), []byte("b"))
tr.Clear(fdb.Key("w2"))
tr.BitXor(fdb.Key("w3"), []byte("\xff\xff"))
return nil, nil
})
if e != nil {
panic(e)
}
if sm.checkWatches(watches, true) {
return
}
}
}
func (sm *StackMachine) testLocality() {
_, e := db.Transact(func(tr fdb.Transaction) (interface{}, error) {
tr.Options().SetTimeout(60 * 1000)
tr.Options().SetReadSystemKeys()
boundaryKeys, e := db.LocalityGetBoundaryKeys(fdb.KeyRange{fdb.Key(""), fdb.Key("\xff\xff")}, 0, 0)
if e != nil {
panic(e)
}
for i := 0; i < len(boundaryKeys)-1; i++ {
start := boundaryKeys[i]
end := tr.GetKey(fdb.LastLessThan(boundaryKeys[i+1])).MustGet()
startAddresses := tr.LocalityGetAddressesForKey(start).MustGet()
endAddresses := tr.LocalityGetAddressesForKey(end).MustGet()
for _, address1 := range startAddresses {
found := false
for _, address2 := range endAddresses {
if address1 == address2 {
found = true
break
}
}
if !found {
panic("Locality not internally consistent.")
}
}
}
return nil, nil
})
if e != nil {
panic(e)
}
}
func (sm *StackMachine) logStack(entries map[int]stackEntry, prefix []byte) {
_, e := db.Transact(func(tr fdb.Transaction) (interface{}, error) {
for index, el := range entries {
var keyt tuple.Tuple
keyt = append(keyt, int64(index))
keyt = append(keyt, int64(el.idx))
pk := append(prefix, keyt.Pack()...)
var valt tuple.Tuple
valt = append(valt, el.item)
pv := valt.Pack()
vl := 40000
if len(pv) < vl {
vl = len(pv)
}
tr.Set(fdb.Key(pk), pv[:vl])
}
return nil, nil
})
if e != nil {
panic(e)
}
return
}
func (sm *StackMachine) currentTransaction() fdb.Transaction {
trMapLock.RLock()
tr := trMap[sm.trName]
trMapLock.RUnlock()
return tr
}
func (sm *StackMachine) newTransactionWithLockHeld() {
tr, e := db.CreateTransaction()
if e != nil {
panic(e)
}
trMap[sm.trName] = tr
}
func (sm *StackMachine) newTransaction() {
trMapLock.Lock()
sm.newTransactionWithLockHeld()
trMapLock.Unlock()
}
func (sm *StackMachine) switchTransaction(name []byte) {
sm.trName = string(name[:])
trMapLock.RLock()
_, present := trMap[sm.trName]
trMapLock.RUnlock()
if !present {
trMapLock.Lock()
_, present = trMap[sm.trName]
if !present {
sm.newTransactionWithLockHeld()
}
trMapLock.Unlock()
}
}
func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) {
defer func() {
if r := recover(); r != nil {
switch r := r.(type) {
case fdb.Error:
sm.store(idx, []byte(tuple.Tuple{[]byte("ERROR"), []byte(fmt.Sprintf("%d", r.Code))}.Pack()))
default:
panic(r)
}
}
}()
var e error
op := inst[0].(string)
if sm.verbose {
fmt.Printf("%d. Instruction is %s (%v)\n", idx, op, sm.prefix)
fmt.Printf("Stack from [")
sm.dumpStack()
fmt.Printf(" ] (%d)\n", len(sm.stack))
}
var t fdb.Transactor
var rt fdb.ReadTransactor
var isDB bool
switch {
case strings.HasSuffix(op, "_SNAPSHOT"):
rt = sm.currentTransaction().Snapshot()
op = op[:len(op)-9]
case strings.HasSuffix(op, "_DATABASE"):
t = db
rt = db
op = op[:len(op)-9]
isDB = true
default:
t = sm.currentTransaction()
rt = sm.currentTransaction()
}
switch {
case op == "PUSH":
sm.store(idx, inst[1])
case op == "DUP":
entry := sm.stack[len(sm.stack)-1]
sm.store(entry.idx, entry.item)
case op == "EMPTY_STACK":
sm.stack = []stackEntry{}
sm.stack = make([]stackEntry, 0)
case op == "SWAP":
idx := sm.waitAndPop().item.(int64)
sm.stack[len(sm.stack)-1], sm.stack[len(sm.stack)-1-int(idx)] = sm.stack[len(sm.stack)-1-int(idx)], sm.stack[len(sm.stack)-1]
case op == "POP":
sm.stack = sm.stack[:len(sm.stack)-1]
case op == "SUB":
var x, y *big.Int
switch x1 := sm.waitAndPop().item.(type) {
case *big.Int:
x = x1
case int64:
x = big.NewInt(x1)
case uint64:
x = new(big.Int)
x.SetUint64(x1)
}
switch y1 := sm.waitAndPop().item.(type) {
case *big.Int:
y = y1
case int64:
y = big.NewInt(y1)
case uint64:
y = new(big.Int)
y.SetUint64(y1)
}
sm.store(idx, x.Sub(x, y))
case op == "CONCAT":
str1 := sm.waitAndPop().item
str2 := sm.waitAndPop().item
switch str1.(type) {
case string:
sm.store(idx, str1.(string)+str2.(string))
case []byte:
sm.store(idx, append(str1.([]byte), str2.([]byte)...))
default:
panic("Invalid CONCAT parameter")
}
case op == "NEW_TRANSACTION":
sm.newTransaction()
case op == "USE_TRANSACTION":
sm.switchTransaction(sm.waitAndPop().item.([]byte))
case op == "ON_ERROR":
sm.store(idx, sm.currentTransaction().OnError(fdb.Error{int(sm.waitAndPop().item.(int64))}))
case op == "GET_READ_VERSION":
_, e = rt.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) {
sm.lastVersion = rtr.GetReadVersion().MustGet()
sm.store(idx, []byte("GOT_READ_VERSION"))
return nil, nil
})
if e != nil {
panic(e)
}
case op == "SET":
key := fdb.Key(sm.waitAndPop().item.([]byte))
value := sm.waitAndPop().item.([]byte)
sm.executeMutation(t, func(tr fdb.Transaction) (interface{}, error) {
tr.Set(key, value)
return nil, nil
}, isDB, idx)
case op == "LOG_STACK":
prefix := sm.waitAndPop().item.([]byte)
entries := make(map[int]stackEntry)
for len(sm.stack) > 0 {
entries[len(sm.stack)-1] = sm.waitAndPop()
if len(entries) == 100 {
sm.logStack(entries, prefix)
entries = make(map[int]stackEntry)
}
}
sm.logStack(entries, prefix)
case op == "GET":
key := fdb.Key(sm.waitAndPop().item.([]byte))
res, e := rt.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) {
return rtr.Get(key), nil
})
if e != nil {
panic(e)
}
sm.store(idx, res.(fdb.FutureByteSlice))
case op == "COMMIT":
sm.store(idx, sm.currentTransaction().Commit())
case op == "RESET":
sm.currentTransaction().Reset()
case op == "CLEAR":
key := fdb.Key(sm.waitAndPop().item.([]byte))
sm.executeMutation(t, func(tr fdb.Transaction) (interface{}, error) {
tr.Clear(key)
return nil, nil
}, isDB, idx)
case op == "SET_READ_VERSION":
sm.currentTransaction().SetReadVersion(sm.lastVersion)
case op == "WAIT_FUTURE":
entry := sm.waitAndPop()
sm.store(entry.idx, entry.item)
case op == "GET_COMMITTED_VERSION":
sm.lastVersion, e = sm.currentTransaction().GetCommittedVersion()
if e != nil {
panic(e)
}
sm.store(idx, []byte("GOT_COMMITTED_VERSION"))
case op == "GET_VERSIONSTAMP":
sm.store(idx, sm.currentTransaction().GetVersionstamp())
case op == "GET_KEY":
sel := sm.popSelector()
prefix := sm.waitAndPop().item.([]byte)
res, e := rt.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) {
return rtr.GetKey(sel).MustGet(), nil
})
if e != nil {
panic(e)
}
key := res.(fdb.Key)
if bytes.HasPrefix(key, prefix) {
sm.store(idx, key)
} else if bytes.Compare(key, prefix) < 0 {
sm.store(idx, prefix)
} else {
s, e := fdb.Strinc(prefix)
if e != nil {
panic(e)
}
sm.store(idx, s)
}
case strings.HasPrefix(op, "GET_RANGE"):
var r fdb.Range
switch op[9:] {
case "_STARTS_WITH":
r = sm.popPrefixRange()
case "_SELECTOR":
r = fdb.SelectorRange{sm.popSelector(), sm.popSelector()}
case "":
r = sm.popKeyRange()
}
ro := sm.popRangeOptions()
var prefix []byte = nil
if op[9:] == "_SELECTOR" {
prefix = sm.waitAndPop().item.([]byte)
}
res, e := rt.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) {
return rtr.GetRange(r, ro).GetSliceOrPanic(), nil
})
if e != nil {
panic(e)
}
sm.pushRange(idx, res.([]fdb.KeyValue), prefix)
case strings.HasPrefix(op, "CLEAR_RANGE"):
var er fdb.ExactRange
switch op[11:] {
case "_STARTS_WITH":
er = sm.popPrefixRange()
case "":
er = sm.popKeyRange()
}
sm.executeMutation(t, func(tr fdb.Transaction) (interface{}, error) {
tr.ClearRange(er)
return nil, nil
}, isDB, idx)
case op == "TUPLE_PACK":
var t tuple.Tuple
count := sm.waitAndPop().item.(int64)
for i := 0; i < int(count); i++ {
t = append(t, sm.waitAndPop().item)
}
sm.store(idx, []byte(t.Pack()))
case op == "TUPLE_UNPACK":
t, e := tuple.Unpack(fdb.Key(sm.waitAndPop().item.([]byte)))
if e != nil {
panic(e)
}
for _, el := range t {
sm.store(idx, []byte(tuple.Tuple{el}.Pack()))
}
case op == "TUPLE_SORT":
count := sm.waitAndPop().item.(int64)
tuples := make([]tuple.Tuple, count)
for i := 0; i < int(count); i++ {
tuples[i], e = tuple.Unpack(fdb.Key(sm.waitAndPop().item.([]byte)))
if e != nil {
panic(e)
}
}
sort.Sort(byBytes(tuples))
for _, t := range tuples {
sm.store(idx, t.Pack())
}
case op == "ENCODE_FLOAT":
val_bytes := sm.waitAndPop().item.([]byte)
var val float32
binary.Read(bytes.NewBuffer(val_bytes), binary.BigEndian, &val)
sm.store(idx, val)
case op == "ENCODE_DOUBLE":
val_bytes := sm.waitAndPop().item.([]byte)
var val float64
binary.Read(bytes.NewBuffer(val_bytes), binary.BigEndian, &val)
sm.store(idx, val)
case op == "DECODE_FLOAT":
val := sm.waitAndPop().item.(float32)
var ibuf bytes.Buffer
binary.Write(&ibuf, binary.BigEndian, val)
sm.store(idx, ibuf.Bytes())
case op == "DECODE_DOUBLE":
val := sm.waitAndPop().item.(float64)
var ibuf bytes.Buffer
binary.Write(&ibuf, binary.BigEndian, val)
sm.store(idx, ibuf.Bytes())
case op == "TUPLE_RANGE":
var t tuple.Tuple
count := sm.waitAndPop().item.(int64)
for i := 0; i < int(count); i++ {
t = append(t, sm.waitAndPop().item)
}
bk, ek := t.FDBRangeKeys()
sm.store(idx, []byte(bk.FDBKey()))
sm.store(idx, []byte(ek.FDBKey()))
case op == "START_THREAD":
newsm := newStackMachine(sm.waitAndPop().item.([]byte), verbose)
sm.threads.Add(1)
go func() {
newsm.Run()
sm.threads.Done()
}()
case op == "WAIT_EMPTY":
prefix := sm.waitAndPop().item.([]byte)
er, e := fdb.PrefixRange(prefix)
if e != nil {
panic(e)
}
db.Transact(func(tr fdb.Transaction) (interface{}, error) {
v := tr.GetRange(er, fdb.RangeOptions{}).GetSliceOrPanic()
if len(v) != 0 {
panic(fdb.Error{1020})
}
return nil, nil
})
sm.store(idx, []byte("WAITED_FOR_EMPTY"))
case op == "READ_CONFLICT_RANGE":
e = sm.currentTransaction().AddReadConflictRange(fdb.KeyRange{fdb.Key(sm.waitAndPop().item.([]byte)), fdb.Key(sm.waitAndPop().item.([]byte))})
if e != nil {
panic(e)
}
sm.store(idx, []byte("SET_CONFLICT_RANGE"))
case op == "WRITE_CONFLICT_RANGE":
e = sm.currentTransaction().AddWriteConflictRange(fdb.KeyRange{fdb.Key(sm.waitAndPop().item.([]byte)), fdb.Key(sm.waitAndPop().item.([]byte))})
if e != nil {
panic(e)
}
sm.store(idx, []byte("SET_CONFLICT_RANGE"))
case op == "READ_CONFLICT_KEY":
e = sm.currentTransaction().AddReadConflictKey(fdb.Key(sm.waitAndPop().item.([]byte)))
if e != nil {
panic(e)
}
sm.store(idx, []byte("SET_CONFLICT_KEY"))
case op == "WRITE_CONFLICT_KEY":
e = sm.currentTransaction().AddWriteConflictKey(fdb.Key(sm.waitAndPop().item.([]byte)))
if e != nil {
panic(e)
}
sm.store(idx, []byte("SET_CONFLICT_KEY"))
case op == "ATOMIC_OP":
opname := strings.Replace(strings.Title(strings.Replace(strings.ToLower(sm.waitAndPop().item.(string)), "_", " ", -1)), " ", "", -1)
key := fdb.Key(sm.waitAndPop().item.([]byte))
ival := sm.waitAndPop().item
value := ival.([]byte)
sm.executeMutation(t, func(tr fdb.Transaction) (interface{}, error) {
reflect.ValueOf(tr).MethodByName(opname).Call([]reflect.Value{reflect.ValueOf(key), reflect.ValueOf(value)})
return nil, nil
}, isDB, idx)
case op == "DISABLE_WRITE_CONFLICT":
sm.currentTransaction().Options().SetNextWriteNoWriteConflictRange()
case op == "CANCEL":
sm.currentTransaction().Cancel()
case op == "UNIT_TESTS":
db.Options().SetLocationCacheSize(100001)
db.Options().SetMaxWatches(10001)
if !fdb.IsAPIVersionSelected() {
log.Fatal("API version should be selected")
}
apiVersion := fdb.MustGetAPIVersion()
if apiVersion == 0 {
log.Fatal("API version is 0")
}
e1 := fdb.APIVersion(apiVersion + 1)
if e1 != nil {
fdbE := e1.(fdb.Error)
if fdbE.Code != 2201 {
panic(e1)
}
} else {
log.Fatal("Was not stopped from selecting two API versions")
}
e2 := fdb.APIVersion(apiVersion - 1)
if e2 != nil {
fdbE := e2.(fdb.Error)
if fdbE.Code != 2201 {
panic(e2)
}
} else {
log.Fatal("Was not stopped from selecting two API versions")
}
fdb.MustAPIVersion(apiVersion)
_, e := db.Transact(func(tr fdb.Transaction) (interface{}, error) {
tr.Options().SetPrioritySystemImmediate()
tr.Options().SetPriorityBatch()
tr.Options().SetCausalReadRisky()
tr.Options().SetCausalWriteRisky()
tr.Options().SetReadYourWritesDisable()
tr.Options().SetReadSystemKeys()
tr.Options().SetAccessSystemKeys()
tr.Options().SetTimeout(60 * 1000)
tr.Options().SetRetryLimit(50)
tr.Options().SetMaxRetryDelay(100)
tr.Options().SetUsedDuringCommitProtectionDisable()
tr.Options().SetTransactionLoggingEnable("my_transaction")
tr.Options().SetReadLockAware()
tr.Options().SetLockAware()
return tr.Get(fdb.Key("\xff")).MustGet(), nil
})
if e != nil {
panic(e)
}
sm.testWatches()
sm.testLocality()
case strings.HasPrefix(op, "DIRECTORY_"):
sm.de.processOp(sm, op[10:], isDB, idx, t, rt)
default:
log.Fatalf("Unhandled operation %s\n", string(inst[0].([]byte)))
}
if sm.verbose {
fmt.Printf(" to [")
sm.dumpStack()
fmt.Printf(" ] (%d)\n\n", len(sm.stack))
}
runtime.Gosched()
}
func (sm *StackMachine) Run() {
r, e := db.Transact(func(tr fdb.Transaction) (interface{}, error) {
return tr.GetRange(tuple.Tuple{sm.prefix}, fdb.RangeOptions{}).GetSliceOrPanic(), nil
})
if e != nil {
panic(e)
}
instructions := r.([]fdb.KeyValue)
for i, kv := range instructions {
inst, _ := tuple.Unpack(fdb.Key(kv.Value))
if sm.verbose {
fmt.Printf("Instruction %d\n", i)
}
sm.processInst(i, inst)
}
sm.threads.Wait()
}
var db fdb.Database
func main() {
var clusterFile string
prefix := []byte(os.Args[1])
if len(os.Args) > 3 {
clusterFile = os.Args[3]
}
var e error
var apiVersion int
apiVersion, e = strconv.Atoi(os.Args[2])
if e != nil {
log.Fatal(e)
}
if fdb.IsAPIVersionSelected() {
log.Fatal("API version already selected")
}
e = fdb.APIVersion(apiVersion)
if e != nil {
log.Fatal(e)
}
if fdb.MustGetAPIVersion() != apiVersion {
log.Fatal("API version not equal to value selected")
}
db, e = fdb.OpenDatabase(clusterFile)
if e != nil {
log.Fatal(e)
}
sm := newStackMachine(prefix, verbose)
sm.Run()
}
| {
sm := StackMachine{verbose: verbose, prefix: prefix, de: newDirectoryExtension(), trName: string(prefix[:])}
return &sm
} |
getCompiledProjects.js | export default function | (state) {
return state.get('compiledProjects');
}
| getCompiledProjects |
EOV_S_65_60.py | #!/usr/bin/env python3
#
# Copyright (c) 2019-2021 LG Electronics, Inc.
#
# This software contains code licensed as described in LICENSE.
#
# See EOV_C_25_20.py for a commented script
import time
import logging
from environs import Env
import lgsvl
FORMAT = "[%(levelname)6s] [%(name)s] %(message)s"
logging.basicConfig(level=logging.WARNING, format=FORMAT)
log = logging.getLogger(__name__)
env = Env()
MAX_EGO_SPEED = 29.167 # (105 km/h, 65 mph)
MAX_POV_SPEED = 26.667 # (96 km/h, 60 mph)
INITIAL_HEADWAY = 130 # spec says >105m
SPEED_VARIANCE = 4
TIME_LIMIT = 30
TIME_DELAY = 5
LGSVL__SIMULATOR_HOST = env.str("LGSVL__SIMULATOR_HOST", "127.0.0.1")
LGSVL__SIMULATOR_PORT = env.int("LGSVL__SIMULATOR_PORT", 8181)
LGSVL__AUTOPILOT_0_HOST = env.str("LGSVL__AUTOPILOT_0_HOST", "127.0.0.1")
LGSVL__AUTOPILOT_0_PORT = env.int("LGSVL__AUTOPILOT_0_PORT", 9090)
print("EOV_S_65_60 - ", end='')
sim = lgsvl.Simulator(LGSVL__SIMULATOR_HOST, LGSVL__SIMULATOR_PORT)
scene_name = env.str("LGSVL__MAP", lgsvl.wise.DefaultAssets.map_straight2laneopposing)
if sim.current_scene == scene_name:
sim.reset()
else:
sim.load(scene_name)
# spawn EGO in the 2nd to right lane
egoState = lgsvl.AgentState()
# A point close to the desired lane was found in Editor.
# This method returns the position and orientation of the closest lane to the point.
egoState.transform = sim.map_point_on_lane(lgsvl.Vector(-1.6, 0, -65))
ego = sim.add_agent(env.str("LGSVL__VEHICLE_0", lgsvl.wise.DefaultAssets.ego_lincoln2017mkz_apollo5_full_analysis), lgsvl.AgentType.EGO, egoState)
forward = lgsvl.utils.transform_to_forward(egoState.transform)
right = lgsvl.utils.transform_to_right(egoState.transform)
ego.connect_bridge(LGSVL__AUTOPILOT_0_HOST, LGSVL__AUTOPILOT_0_PORT)
dv = lgsvl.dreamview.Connection(sim, ego, LGSVL__AUTOPILOT_0_HOST)
dv.set_hd_map(env.str("LGSVL__AUTOPILOT_HD_MAP", 'Straight2LaneOpposing'))
dv.set_vehicle(env.str("LGSVL__AUTOPILOT_0_VEHICLE_CONFIG", 'Lincoln2017MKZ'))
try:
modules = env.list("LGSVL__AUTOPILOT_0_VEHICLE_MODULES", subcast=str)
if len(modules) == 0:
log.warning("LGSVL__AUTOPILOT_0_VEHICLE_MODULES is empty, using default list: {0}".format(modules))
modules = [
'Recorder',
'Localization',
'Perception',
'Transform',
'Routing',
'Prediction',
'Planning',
'Traffic Light',
'Control'
]
except Exception:
modules = [
'Recorder',
'Localization',
'Perception',
'Transform',
'Routing',
'Prediction',
'Planning',
'Traffic Light',
'Control'
]
log.warning("LGSVL__AUTOPILOT_0_VEHICLE_MODULES is not set, using default list: {0}".format(modules))
destination = egoState.position + 135 * forward
dv.setup_apollo(destination.x, destination.z, modules)
finalPOVWaypointPosition = egoState.position - 2.15 * right
POVState = lgsvl.AgentState()
POVState.transform.position = egoState.position + (4.5 + INITIAL_HEADWAY) * forward - 2.15 * right
POVState.transform.rotation = lgsvl.Vector(0, -180, 0)
POV = sim.add_agent("Sedan", lgsvl.AgentType.NPC, POVState)
POVWaypoints = []
POVWaypoints.append(lgsvl.DriveWaypoint(POVState.transform.position, MAX_POV_SPEED, POVState.transform.rotation))
POVWaypoints.append(lgsvl.DriveWaypoint(finalPOVWaypointPosition, 0, POVState.transform.rotation))
def on_collision(agent1, agent2, contact):
|
ego.on_collision(on_collision)
POV.on_collision(on_collision)
try:
t0 = time.time()
sim.run(TIME_DELAY)
POV.follow(POVWaypoints)
while True:
egoCurrentState = ego.state
if egoCurrentState.speed > MAX_EGO_SPEED + SPEED_VARIANCE:
raise lgsvl.evaluator.TestException(
"Ego speed exceeded limit, {} > {} m/s".format(egoCurrentState.speed, MAX_EGO_SPEED + SPEED_VARIANCE)
)
POVCurrentState = POV.state
if POVCurrentState.speed > MAX_EGO_SPEED + SPEED_VARIANCE:
raise lgsvl.evaluator.TestException(
"POV1 speed exceeded limit, {} > {} m/s".format(POVCurrentState.speed, MAX_POV_SPEED + SPEED_VARIANCE)
)
sim.run(0.5)
if time.time() - t0 > TIME_LIMIT:
break
except lgsvl.evaluator.TestException as e:
exit("FAILED: {}".format(e))
print("PASSED")
| raise lgsvl.evaluator.TestException("Ego collided with {}".format(agent2)) |
control.rs | use std::{
convert::TryFrom,
fmt::{self, Debug, Formatter},
net::{IpAddr, Ipv4Addr, Ipv6Addr},
};
use bitflags::bitflags;
use bytes::{Buf, BufMut};
use log::warn;
use crate::protocol::{TimeSpan, TimeStamp};
use crate::{MsgNumber, SeqNumber, SocketId};
mod srt;
pub use self::srt::*;
use super::PacketParseError;
use fmt::Display;
/// A UDP packet carrying control information
///
/// ```ignore,
/// 0 1 2 3
/// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
/// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/// |1| Type | Reserved |
/// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/// | | Additional Info |
/// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/// | Time Stamp |
/// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/// | Destination Socket ID |
/// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/// | |
/// ~ Control Information Field ~
/// | |
/// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/// ```
/// (from <https://tools.ietf.org/html/draft-gg-udt-03#page-5>)
#[derive(Clone, PartialEq, Eq)]
pub struct ControlPacket {
/// The timestamp, relative to the socket start time (wrapping every 2^32 microseconds)
pub timestamp: TimeStamp,
/// The dest socket ID, used for multiplexing
pub dest_sockid: SocketId,
/// The extra data
pub control_type: ControlTypes,
}
/// The different kind of control packets
#[derive(Clone, PartialEq, Eq)]
#[allow(clippy::large_enum_variant)]
pub enum ControlTypes {
/// The control packet for initiating connections, type 0x0
/// Does not use Additional Info
Handshake(HandshakeControlInfo),
/// To keep a connection alive
/// Does not use Additional Info or Control Info, type 0x1
KeepAlive,
/// ACK packet, type 0x2
Ack(AckControlInfo),
/// NAK packet, type 0x3
/// Additional Info isn't used
/// The information is stored in the loss compression format, specified in the loss_compression module.
Nak(Vec<u32>),
/// Shutdown packet, type 0x5
Shutdown,
/// Acknowledgement of Acknowledgement (ACK2) 0x6
/// Additional Info (the i32) is the ACK sequence number to acknowldege
Ack2(i32),
/// Drop request, type 0x7
DropRequest {
/// The message to drop
/// Stored in the "addditional info" field of the packet.
msg_to_drop: MsgNumber,
/// The first sequence number in the message to drop
first: SeqNumber,
/// The last sequence number in the message to drop
last: SeqNumber,
},
/// Srt control packets
/// These use the UDT extension type 0xFF
Srt(SrtControlPacket),
}
bitflags! {
/// Used to describe the extension types in the packet
struct ExtFlags: u16 {
/// The packet has a handshake extension
const HS = 0b1;
/// The packet has a kmreq extension
const KM = 0b10;
/// The packet has a config extension (SID or smoother or filter or group)
const CONFIG = 0b100;
}
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct HsV5Info {
/// the crypto size in bytes, either 0 (no encryption), 16, 24, or 32 (stored /8)
/// source: https://github.com/Haivision/srt/blob/master/docs/stransmit.md#medium-srt
pub crypto_size: u8,
/// The extension HSReq/HSResp
pub ext_hs: Option<SrtControlPacket>,
/// The extension KMREQ/KMRESP
pub ext_km: Option<SrtControlPacket>,
/// The SID
pub sid: Option<String>,
}
/// HS-version dependenent data
#[derive(Clone, PartialEq, Eq)]
#[allow(clippy::large_enum_variant)]
pub enum HandshakeVsInfo {
V4(SocketType),
V5(HsV5Info),
}
/// The control info for handshake packets
#[derive(Clone, PartialEq, Eq)]
pub struct HandshakeControlInfo {
/// The initial sequence number, usually randomly initialized
pub init_seq_num: SeqNumber,
/// Max packet size, including UDP/IP headers. 1500 by default
pub max_packet_size: u32,
/// Max flow window size, by default 25600
pub max_flow_size: u32,
/// Designates where in the handshake process this packet lies
pub shake_type: ShakeType,
/// The socket ID that this request is originating from
pub socket_id: SocketId,
/// SYN cookie
///
/// "generates a cookie value according to the client address and a
/// secret key and sends it back to the client. The client must then send
/// back the same cookie to the server."
pub syn_cookie: i32,
/// The IP address of the connecting client
pub peer_addr: IpAddr,
/// The rest of the data, which is HS version specific
pub info: HandshakeVsInfo,
}
#[derive(Clone, PartialEq, Eq)]
pub struct AckControlInfo {
/// The ack sequence number of this ack, increments for each ack sent.
/// Stored in additional info
pub ack_seq_num: i32,
/// The packet sequence number that all packets have been recieved until (excluding)
pub ack_number: SeqNumber,
/// Round trip time
pub rtt: Option<TimeSpan>,
/// RTT variance
pub rtt_variance: Option<TimeSpan>,
/// available buffer
pub buffer_available: Option<i32>,
/// receive rate, in packets/sec
pub packet_recv_rate: Option<u32>,
/// Estimated Link capacity
pub est_link_cap: Option<i32>,
}
/// The socket type for a handshake.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum SocketType {
/// A stream socket, 1 when serialized
Stream = 1,
/// A datagram socket, 2 when serialied
Datagram = 2,
}
/// See <https://tools.ietf.org/html/draft-gg-udt-03#page-10>
///
/// More applicably,
///
/// Note: the client-server connection uses:
/// --> INDUCTION (empty)
/// <-- INDUCTION (cookie)
/// --> CONCLUSION (cookie)
/// <-- CONCLUSION (ok)
///
/// The rendezvous HSv4 (legacy):
/// --> WAVEAHAND (effective only if peer is also connecting)
/// <-- CONCLUSION (empty) (consider yourself connected upon reception)
/// --> AGREEMENT (sent as a response for conclusion, requires no response)
///
/// The rendezvous HSv5 (using SRT extensions):
/// --> WAVEAHAND (with cookie)
/// --- (selecting INITIATOR/RESPONDER by cookie contest - comparing one another's cookie)
/// <-- CONCLUSION (without extensions, if RESPONDER, with extensions, if INITIATOR)
/// --> CONCLUSION (with response extensions, if RESPONDER)
/// <-- AGREEMENT (sent exclusively by INITIATOR upon reception of CONCLUSIOn with response extensions)
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ShakeType {
/// First handshake exchange in client-server connection
Induction,
/// A rendezvous connection, initial connect request, 0
Waveahand,
/// A rendezvous connection, response to initial connect request, -1
/// Also a regular connection client response to the second handshake
Conclusion,
/// Final rendezvous check, -2
Agreement,
/// Reject
Rejection(RejectReason),
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[non_exhaustive]
pub enum CoreRejectReason {
System = 1001,
Peer = 1002,
Resource = 1003,
Rogue = 1004,
Backlog = 1005,
Ipe = 1006,
Close = 1007,
Version = 1008,
RdvCookie = 1009,
BadSecret = 1010,
Unsecure = 1011,
MessageApi = 1012,
Congestion = 1013,
Filter = 1014,
Group = 1015,
Timeout = 1016,
}
#[non_exhaustive]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ServerRejectReason {
Fallback = 2000,
KeyNotSup = 2001,
Filepath = 2002,
HostNotFound = 2003,
BadRequest = 2400,
Unauthorized = 2401,
Overload = 2402,
Forbidden = 2403,
Notfound = 2404,
BadMode = 2405,
Unacceptable = 2406,
Conflict = 2409,
NotSupMedia = 2415,
Locked = 2423,
FailedDepend = 2424,
InternalServerError = 2500,
Unimplemented = 2501,
Gateway = 2502,
Down = 2503,
Version = 2505,
NoRoom = 2507,
}
/// Reject code
/// *must* be >= 1000
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum RejectReason {
/// Core reject codes, [1000, 2000)
Core(CoreRejectReason),
CoreUnrecognized(i32),
/// Server reject codes, [2000, 3000)
Server(ServerRejectReason),
ServerUnrecognized(i32),
/// User reject code, >3000
User(i32),
}
impl HandshakeVsInfo {
/// Get the type (V4) or ext flags (V5)
/// the shake_type is required to decide to encode the magic code
fn type_flags(&self, shake_type: ShakeType) -> u32 {
match self {
HandshakeVsInfo::V4(ty) => *ty as u32,
HandshakeVsInfo::V5(hs) => {
if shake_type == ShakeType::Induction
&& (hs.ext_hs.is_some() || hs.ext_km.is_some() || hs.sid.is_some())
{
// induction does not include any extensions, and instead has the
// magic code. this is an incompatialbe place to be.
panic!("Handshake is both induction and has SRT extensions, not valid");
}
let mut flags = ExtFlags::empty();
if hs.ext_hs.is_some() {
flags |= ExtFlags::HS;
}
if hs.ext_km.is_some() {
flags |= ExtFlags::KM;
}
if hs.sid.is_some() {
flags |= ExtFlags::CONFIG;
}
// take the crypto size, get rid of the frist three (guaranteed zero) bits, then shift it into the
// most significant 2-byte word
(u32::from(hs.crypto_size) >> 3 << 16)
// when this is an induction packet, includ the magic code instead of flags
| if shake_type == ShakeType::Induction {
u32::from(SRT_MAGIC_CODE)
} else {
u32::from(flags.bits())
}
}
}
}
/// Get the UDT version
pub fn version(&self) -> u32 {
match self {
HandshakeVsInfo::V4(_) => 4,
HandshakeVsInfo::V5 { .. } => 5,
}
}
}
impl SocketType {
/// Turns a u32 into a SocketType. If the u32 wasn't valid (only 1 and 2 are valid), than it returns Err(num)
pub fn from_u16(num: u16) -> Result<SocketType, u16> {
match num {
1 => Ok(SocketType::Stream),
2 => Ok(SocketType::Datagram),
i => Err(i),
}
}
}
impl ControlPacket {
pub fn parse(buf: &mut impl Buf, is_ipv6: bool) -> Result<ControlPacket, PacketParseError> {
let control_type = buf.get_u16() << 1 >> 1; // clear first bit
// get reserved data, which is the last two bytes of the first four bytes
let reserved = buf.get_u16();
let add_info = buf.get_i32();
let timestamp = TimeStamp::from_micros(buf.get_u32());
let dest_sockid = buf.get_u32();
Ok(ControlPacket {
timestamp,
dest_sockid: SocketId(dest_sockid),
// just match against the second byte, as everything is in that
control_type: ControlTypes::deserialize(
control_type,
reserved,
add_info,
buf,
is_ipv6,
)?,
})
}
pub fn serialize<T: BufMut>(&self, into: &mut T) {
// first half of first row, the control type and the 1st bit which is a one
into.put_u16(self.control_type.id_byte() | (0b1 << 15));
// finish that row, which is reserved
into.put_u16(self.control_type.reserved());
// the additonal info line
into.put_i32(self.control_type.additional_info());
// timestamp
into.put_u32(self.timestamp.as_micros());
// dest sock id
into.put_u32(self.dest_sockid.0);
// the rest of the info
self.control_type.serialize(into);
}
pub fn handshake(&self) -> Option<&HandshakeControlInfo> {
if let ControlTypes::Handshake(hs) = &self.control_type {
Some(hs)
} else {
None
}
}
}
impl Debug for ControlPacket {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
write!(
f,
"{{{:?} ts={:.4}s dst={:?}}}",
self.control_type,
self.timestamp.as_secs_f64(),
self.dest_sockid,
)
}
}
// I definitely don't totally understand this yet.
// Points of interest: handshake.h:wrapFlags
// core.cpp:8176 (processConnectionRequest -> if INDUCTION)
const SRT_MAGIC_CODE: u16 = 0x4A17;
impl ControlTypes {
/// Deserialize a control info
/// * `packet_type` - The packet ID byte, the second byte in the first row
/// * `reserved` - the second 16 bytes of the first row, reserved for custom packets
fn deserialize<T: Buf>(
packet_type: u16,
reserved: u16,
extra_info: i32,
mut buf: T,
is_ipv6: bool,
) -> Result<ControlTypes, PacketParseError> {
match packet_type {
0x0 => {
// Handshake
// make sure the packet is large enough -- 8 32-bit words, 1 128 (ip)
if buf.remaining() < 8 * 4 + 16 {
return Err(PacketParseError::NotEnoughData);
}
let udt_version = buf.get_i32();
if udt_version != 4 && udt_version != 5 {
return Err(PacketParseError::BadUdtVersion(udt_version));
}
// the second 32 bit word is always socket type under UDT4
// under SRT HSv5, it is a bit more complex:
//
// byte 1-2: the crypto key size, rightshifted by three. For example 0b11 would translate to a crypto size of 24
// source: https://github.com/Haivision/srt/blob/4f7f2beb2e1e306111b9b11402049a90cb6d3787/srtcore/handshake.h#L123-L125
let crypto_size = buf.get_u16() << 3;
// byte 3-4: the SRT_MAGIC_CODE, to make sure a client is HSv5 or the ExtFlags if this is an induction response
// else, this is the extension flags
//
// it's ok to only have the lower 16 bits here for the socket type because socket types always have a zero upper 16 bits
let type_ext_socket_type = buf.get_u16();
let init_seq_num = SeqNumber::new_truncate(buf.get_u32()); // TODO: should this truncate?
let max_packet_size = buf.get_u32();
let max_flow_size = buf.get_u32();
let shake_type = match ShakeType::try_from(buf.get_i32()) {
Ok(ct) => ct,
Err(err_ct) => return Err(PacketParseError::BadConnectionType(err_ct)),
};
let socket_id = SocketId(buf.get_u32());
let syn_cookie = buf.get_i32();
let peer_addr = if !is_ipv6 {
let ip = buf.get_u32_le();
buf.get_u32();
buf.get_u32();
buf.get_u32();
IpAddr::from(Ipv4Addr::from(ip))
} else {
let mut ip_buf = [0u8; 16];
buf.copy_to_slice(&mut ip_buf);
IpAddr::from(Ipv6Addr::from(ip_buf))
};
let info = match udt_version {
4 => HandshakeVsInfo::V4(match SocketType::from_u16(type_ext_socket_type) {
Ok(t) => t,
Err(e) => return Err(PacketParseError::BadSocketType(e)),
}),
5 => {
// make sure crypto size is of a valid variant
let crypto_size = match crypto_size {
0 | 16 | 24 | 32 => crypto_size as u8,
c => {
warn!(
"Unrecognized crypto key length: {}, disabling encryption. Should be 0, 16, 24, or 32 bytes.",
c
);
0
}
};
if shake_type == ShakeType::Induction {
if type_ext_socket_type != SRT_MAGIC_CODE {
// TODO: should this bail? What does the reference implementation do?
warn!("HSv5 induction response did not have SRT_MAGIC_CODE, which is suspicious")
}
HandshakeVsInfo::V5(HsV5Info::default())
} else {
// if this is not induction, this is the extension flags
let extensions = match ExtFlags::from_bits(type_ext_socket_type) {
Some(i) => i,
None => {
warn!(
"Unnecessary bits in extensions flags: {:b}",
type_ext_socket_type
);
ExtFlags::from_bits_truncate(type_ext_socket_type)
}
};
// parse out extensions
let mut sid = None;
let mut ext_hs = None;
let mut ext_km = None;
while buf.remaining() > 4 {
let pack_type = buf.get_u16();
let pack_size_words = buf.get_u16();
let pack_size = usize::from(pack_size_words) * 4;
if buf.remaining() < pack_size {
return Err(PacketParseError::NotEnoughData);
}
let mut buffer = buf.take(pack_size);
match pack_type {
1 | 2 => {
if !extensions.contains(ExtFlags::HS) |
if ext_hs != None {
warn!("Handshake contains multiple handshake extensions, only the last will be applied!");
}
ext_hs =
Some(SrtControlPacket::parse(pack_type, &mut buffer)?);
}
3 | 4 => {
if !extensions.contains(ExtFlags::KM) {
warn!("Handshake contains key material extension type {} without KMREQ flag!", pack_type);
}
if ext_km != None {
warn!("Handshake contains multiple key material extensions, only the last will be applied!");
}
ext_km =
Some(SrtControlPacket::parse(pack_type, &mut buffer)?);
}
_ => {
if !extensions.contains(ExtFlags::CONFIG) {
warn!("Handshake contains config extension type {} without CONFIG flag!", pack_type);
}
match SrtControlPacket::parse(pack_type, &mut buffer)? {
//5 = sid:
SrtControlPacket::StreamId(stream_id) => {
sid = Some(stream_id)
}
_ => unimplemented!("Implement other kinds"),
}
}
}
buf = buffer.into_inner();
}
if buf.remaining() != 0 {
warn!("Handshake has data left, but not enough for an extension!");
}
if ext_hs.is_none() && extensions.contains(ExtFlags::HS) {
warn!("Handshake has HSREQ flag, but contains no handshake extensions!");
}
if ext_km.is_none() && extensions.contains(ExtFlags::KM) {
warn!("Handshake has KMREQ flag, but contains no key material extensions!");
}
HandshakeVsInfo::V5(HsV5Info {
crypto_size,
ext_hs,
ext_km,
sid,
})
}
}
_ => unreachable!(), // this is already checked for above
};
Ok(ControlTypes::Handshake(HandshakeControlInfo {
init_seq_num,
max_packet_size,
max_flow_size,
shake_type,
socket_id,
syn_cookie,
peer_addr,
info,
}))
}
0x1 => {
// discard the "unused" packet field, if it exists
if buf.remaining() >= 4 {
buf.get_u32();
}
Ok(ControlTypes::KeepAlive)
}
0x2 => {
// ACK
// make sure there are enough bytes -- only one required field
if buf.remaining() < 4 {
return Err(PacketParseError::NotEnoughData);
}
// read control info
let ack_number = SeqNumber::new_truncate(buf.get_u32());
// if there is more data, use it. However, it's optional
let opt_read_next_u32 = |buf: &mut T| {
if buf.remaining() >= 4 {
Some(buf.get_u32())
} else {
None
}
};
let opt_read_next_i32 = |buf: &mut T| {
if buf.remaining() >= 4 {
Some(buf.get_i32())
} else {
None
}
};
let rtt = opt_read_next_i32(&mut buf).map(TimeSpan::from_micros);
let rtt_variance = opt_read_next_i32(&mut buf).map(TimeSpan::from_micros);
let buffer_available = opt_read_next_i32(&mut buf);
let packet_recv_rate = opt_read_next_u32(&mut buf);
let est_link_cap = opt_read_next_i32(&mut buf);
Ok(ControlTypes::Ack(AckControlInfo {
ack_seq_num: extra_info,
ack_number,
rtt,
rtt_variance,
buffer_available,
packet_recv_rate,
est_link_cap,
}))
}
0x3 => {
// NAK
let mut loss_info = Vec::new();
while buf.remaining() >= 4 {
loss_info.push(buf.get_u32());
}
Ok(ControlTypes::Nak(loss_info))
}
0x5 => {
if buf.remaining() >= 4 {
buf.get_u32(); // discard "unused" packet field
}
Ok(ControlTypes::Shutdown)
}
0x6 => {
// ACK2
if buf.remaining() >= 4 {
buf.get_u32(); // discard "unused" packet field
}
Ok(ControlTypes::Ack2(extra_info))
}
0x7 => {
// Drop request
if buf.remaining() < 2 * 4 {
return Err(PacketParseError::NotEnoughData);
}
Ok(ControlTypes::DropRequest {
msg_to_drop: MsgNumber::new_truncate(extra_info as u32), // cast is safe, just reinterpret
first: SeqNumber::new_truncate(buf.get_u32()),
last: SeqNumber::new_truncate(buf.get_u32()),
})
}
0x7FFF => {
// Srt
Ok(ControlTypes::Srt(SrtControlPacket::parse(
reserved, &mut buf,
)?))
}
x => Err(PacketParseError::BadControlType(x)),
}
}
fn id_byte(&self) -> u16 {
match *self {
ControlTypes::Handshake(_) => 0x0,
ControlTypes::KeepAlive => 0x1,
ControlTypes::Ack { .. } => 0x2,
ControlTypes::Nak(_) => 0x3,
ControlTypes::Shutdown => 0x5,
ControlTypes::Ack2(_) => 0x6,
ControlTypes::DropRequest { .. } => 0x7,
ControlTypes::Srt(_) => 0x7FFF,
}
}
fn additional_info(&self) -> i32 {
match self {
// These types have additional info
ControlTypes::DropRequest { msg_to_drop: a, .. } => a.as_raw() as i32,
ControlTypes::Ack2(a) | ControlTypes::Ack(AckControlInfo { ack_seq_num: a, .. }) => *a,
// These do not, just use zero
_ => 0,
}
}
fn reserved(&self) -> u16 {
match self {
ControlTypes::Srt(srt) => srt.type_id(),
_ => 0,
}
}
fn serialize<T: BufMut>(&self, into: &mut T) {
match self {
ControlTypes::Handshake(ref c) => {
into.put_u32(c.info.version());
into.put_u32(c.info.type_flags(c.shake_type));
into.put_u32(c.init_seq_num.as_raw());
into.put_u32(c.max_packet_size);
into.put_u32(c.max_flow_size);
into.put_i32(c.shake_type.into());
into.put_u32(c.socket_id.0);
into.put_i32(c.syn_cookie);
match c.peer_addr {
IpAddr::V4(four) => {
let v = u32::from(four);
into.put_u32_le(v);
// the data structure reuiqres enough space for an ipv6, so pad the end with 16 - 4 = 12 bytes
into.put(&[0; 12][..]);
}
IpAddr::V6(six) => {
let v = u128::from(six);
into.put_u128(v);
}
}
// serialzie extensions
if let HandshakeVsInfo::V5(hs) = &c.info {
for ext in [
&hs.ext_hs,
&hs.ext_km,
&hs.sid.clone().map(SrtControlPacket::StreamId),
]
.iter()
.filter_map(|&s| s.as_ref())
{
into.put_u16(ext.type_id());
// put the size in 32-bit integers
into.put_u16(ext.size_words());
ext.serialize(into);
}
}
}
ControlTypes::Ack(AckControlInfo {
ack_number,
rtt,
rtt_variance,
buffer_available,
packet_recv_rate,
est_link_cap,
..
}) => {
into.put_u32(ack_number.as_raw());
into.put_i32(rtt.map(|t| t.as_micros()).unwrap_or(10_000));
into.put_i32(rtt_variance.map(|t| t.as_micros()).unwrap_or(50_000));
into.put_i32(buffer_available.unwrap_or(8175)); // TODO: better defaults
into.put_u32(packet_recv_rate.unwrap_or(10_000));
into.put_i32(est_link_cap.unwrap_or(1_000));
}
ControlTypes::Nak(ref n) => {
for &loss in n {
into.put_u32(loss);
}
}
ControlTypes::DropRequest { .. } => unimplemented!(),
ControlTypes::Ack2(_) | ControlTypes::Shutdown | ControlTypes::KeepAlive => {
// The reference implementation appends one (4 byte) word at the end of these packets, which wireshark labels as 'Unused'
// I have no idea why, but wireshark reports it as a "malformed packet" without it. For the record,
// this is NOT in the UDT specification. I wonder if this was carried over from the original UDT implementation.
// https://github.com/Haivision/srt/blob/86013826b5e0c4d8e531cf18a30c6ad4b16c1b3b/srtcore/packet.cpp#L309
into.put_u32(0x0);
}
ControlTypes::Srt(srt) => {
srt.serialize(into);
}
};
}
}
impl Debug for ControlTypes {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
match self {
ControlTypes::Handshake(hs) => write!(f, "{:?}", hs),
ControlTypes::KeepAlive => write!(f, "KeepAlive"),
ControlTypes::Ack(AckControlInfo {
ack_seq_num,
ack_number,
rtt,
rtt_variance,
buffer_available,
packet_recv_rate,
est_link_cap,
}) => {
write!(f, "Ack(asn={} an={}", ack_seq_num, ack_number,)?;
if let Some(rtt) = rtt {
write!(f, " rtt={}", rtt.as_micros())?;
}
if let Some(rttvar) = rtt_variance {
write!(f, " rttvar={}", rttvar.as_micros())?;
}
if let Some(buf) = buffer_available {
write!(f, " buf_av={}", buf)?;
}
if let Some(prr) = packet_recv_rate {
write!(f, " pack_rr={}", prr)?;
}
if let Some(link_cap) = est_link_cap {
write!(f, " link_cap={}", link_cap)?;
}
write!(f, ")")?;
Ok(())
}
ControlTypes::Nak(nak) => {
write!(f, "Nak({:?})", nak) // TODO could be better, show ranges
}
ControlTypes::Shutdown => write!(f, "Shutdown"),
ControlTypes::Ack2(ackno) => write!(f, "Ack2({})", ackno),
ControlTypes::DropRequest {
msg_to_drop,
first,
last,
} => write!(f, "DropReq(msg={} {}-{})", msg_to_drop, first, last),
ControlTypes::Srt(srt) => write!(f, "{:?}", srt),
}
}
}
// pub init_seq_num: SeqNumber,
// /// Max packet size, including UDP/IP headers. 1500 by default
// pub max_packet_size: u32,
// /// Max flow window size, by default 25600
// pub max_flow_size: u32,
// /// Designates where in the handshake process this packet lies
// pub shake_type: ShakeType,
// /// The socket ID that this request is originating from
// pub socket_id: SocketID,
// /// SYN cookie
// ///
// /// "generates a cookie value according to the client address and a
// /// secret key and sends it back to the client. The client must then send
// /// back the same cookie to the server."
// pub syn_cookie: i32,
// /// The IP address of the connecting client
// pub peer_addr: IpAddr,
// /// The rest of the data, which is HS version specific
// pub info: HandshakeVSInfo,
impl Debug for HandshakeControlInfo {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
write!(
f,
"HS {:?} from={:?} {:?}",
self.shake_type, self.socket_id, self.info
)
}
}
impl Default for HsV5Info {
fn default() -> Self {
HsV5Info {
crypto_size: 0,
ext_hs: None,
ext_km: None,
sid: None,
}
}
}
impl Debug for HandshakeVsInfo {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
HandshakeVsInfo::V4(stype) => write!(f, "UDT: {:?}", stype),
HandshakeVsInfo::V5(hs) => {
write!(f, "SRT: crypto={:?}", hs.crypto_size)?;
if let Some(pack) = &hs.ext_hs {
write!(f, " hs={:?}", pack)?;
}
if let Some(pack) = &hs.ext_km {
write!(f, " km={:?}", pack)?;
}
if let Some(sid) = &hs.sid {
write!(f, " sid={:?}", sid)?;
}
Ok(())
}
}
}
}
impl TryFrom<i32> for ShakeType {
/// Turns an i32 into a `ConnectionType`, returning Err(num) if no valid one was passed.
type Error = i32;
fn try_from(value: i32) -> Result<Self, Self::Error> {
match value {
1 => Ok(ShakeType::Induction),
0 => Ok(ShakeType::Waveahand),
-1 => Ok(ShakeType::Conclusion),
-2 => Ok(ShakeType::Agreement),
i if i < 1000 => Err(i), // not a basic type and not a rejection code
i => Ok(ShakeType::Rejection(RejectReason::try_from(i).unwrap())), // unwrap is safe--will always be >= 1000
}
}
}
impl From<ShakeType> for i32 {
fn from(st: ShakeType) -> i32 {
match st {
ShakeType::Induction => 1,
ShakeType::Waveahand => 0,
ShakeType::Conclusion => -1,
ShakeType::Agreement => -2,
ShakeType::Rejection(rej) => rej.into(),
}
}
}
/// Returns error if value < 1000
impl TryFrom<i32> for RejectReason {
type Error = i32;
fn try_from(value: i32) -> Result<Self, Self::Error> {
match value {
v if v < 1000 => Err(v),
v if v < 2000 => Ok(match CoreRejectReason::try_from(v) {
Ok(rr) => RejectReason::Core(rr),
Err(rr) => RejectReason::CoreUnrecognized(rr),
}),
v if v < 3000 => Ok(match ServerRejectReason::try_from(v) {
Ok(rr) => RejectReason::Server(rr),
Err(rr) => RejectReason::ServerUnrecognized(rr),
}),
v => Ok(RejectReason::User(v)),
}
}
}
impl From<RejectReason> for i32 {
fn from(rr: RejectReason) -> i32 {
match rr {
RejectReason::Core(c) => c.into(),
RejectReason::CoreUnrecognized(c) => c,
RejectReason::Server(s) => s.into(),
RejectReason::ServerUnrecognized(s) => s,
RejectReason::User(u) => u,
}
}
}
impl Display for RejectReason {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
RejectReason::Core(c) => write!(f, "{}", c),
RejectReason::CoreUnrecognized(c) => write!(f, "Unrecognized core error: {}", c),
RejectReason::Server(s) => write!(f, "{}", s),
RejectReason::ServerUnrecognized(s) => write!(f, "Unrecognized server error: {}", s),
RejectReason::User(u) => write!(f, "User error: {}", u),
}
}
}
impl From<CoreRejectReason> for RejectReason {
fn from(rr: CoreRejectReason) -> RejectReason {
RejectReason::Core(rr)
}
}
impl TryFrom<i32> for CoreRejectReason {
type Error = i32;
fn try_from(value: i32) -> Result<Self, Self::Error> {
use CoreRejectReason::*;
Ok(match value {
1001 => System,
1002 => Peer,
1003 => Resource,
1004 => Rogue,
1005 => Backlog,
1006 => Ipe,
1007 => Close,
1008 => Version,
1009 => RdvCookie,
1010 => BadSecret,
1011 => Unsecure,
1012 => MessageApi,
1013 => Congestion,
1014 => Filter,
1015 => Group,
1016 => Timeout,
other => return Err(other),
})
}
}
impl From<CoreRejectReason> for i32 {
fn from(rr: CoreRejectReason) -> i32 {
rr as i32
}
}
impl Display for CoreRejectReason {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
CoreRejectReason::System => write!(f, "broken due to system function error"),
CoreRejectReason::Peer => write!(f, "connection was rejected by peer"),
CoreRejectReason::Resource => write!(f, "internal problem with resource allocation"),
CoreRejectReason::Rogue => write!(f, "incorrect data in handshake messages"),
CoreRejectReason::Backlog => write!(f, "listener's backlog exceeded"),
CoreRejectReason::Ipe => write!(f, "internal program error"),
CoreRejectReason::Close => write!(f, "socket is closing"),
CoreRejectReason::Version => {
write!(f, "peer is older version than agent's minimum set")
}
CoreRejectReason::RdvCookie => write!(f, "rendezvous cookie collision"),
CoreRejectReason::BadSecret => write!(f, "wrong password"),
CoreRejectReason::Unsecure => write!(f, "password required or unexpected"),
CoreRejectReason::MessageApi => write!(f, "streamapi/messageapi collision"),
CoreRejectReason::Congestion => write!(f, "incompatible congestion-controller type"),
CoreRejectReason::Filter => write!(f, "incompatible packet filter"),
CoreRejectReason::Group => write!(f, "incompatible group"),
CoreRejectReason::Timeout => write!(f, "connection timeout"),
}
}
}
impl From<ServerRejectReason> for RejectReason {
fn from(rr: ServerRejectReason) -> RejectReason {
RejectReason::Server(rr)
}
}
impl TryFrom<i32> for ServerRejectReason {
type Error = i32;
fn try_from(value: i32) -> Result<Self, Self::Error> {
Ok(match value {
2000 => ServerRejectReason::Fallback,
2001 => ServerRejectReason::KeyNotSup,
2002 => ServerRejectReason::Filepath,
2003 => ServerRejectReason::HostNotFound,
2400 => ServerRejectReason::BadRequest,
2401 => ServerRejectReason::Unauthorized,
2402 => ServerRejectReason::Overload,
2403 => ServerRejectReason::Forbidden,
2404 => ServerRejectReason::Notfound,
2405 => ServerRejectReason::BadMode,
2406 => ServerRejectReason::Unacceptable,
2409 => ServerRejectReason::Conflict,
2415 => ServerRejectReason::NotSupMedia,
2423 => ServerRejectReason::Locked,
2424 => ServerRejectReason::FailedDepend,
2500 => ServerRejectReason::InternalServerError,
2501 => ServerRejectReason::Unimplemented,
2502 => ServerRejectReason::Gateway,
2503 => ServerRejectReason::Down,
2505 => ServerRejectReason::Version,
2507 => ServerRejectReason::NoRoom,
unrecog => return Err(unrecog),
})
}
}
impl From<ServerRejectReason> for i32 {
fn from(rr: ServerRejectReason) -> i32 {
rr as i32
}
}
impl Display for ServerRejectReason {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ServerRejectReason::Fallback =>
write!(f, "the application wants to report some problem, but can't precisely specify it"),
ServerRejectReason::KeyNotSup =>
write!(f, "The key used in the StreamID keyed string is not supported by the service"),
ServerRejectReason::Filepath =>write!(f, "The resource type designates a file and the path is either wrong syntax or not found"),
ServerRejectReason::HostNotFound => write!(f, "The `h` host specification was not recognized by the service"),
ServerRejectReason::BadRequest => write!(f, "General syntax error in the SocketID specification (also a fallback code for undefined cases)"),
ServerRejectReason::Unauthorized => write!(f, "Authentication failed, provided that the user was correctly identified and access to the required resource would be granted"),
ServerRejectReason::Overload => write!(f, "The server is too heavily loaded, or you have exceeded credits for accessing the service and the resource"),
ServerRejectReason::Forbidden => write!(f, "Access denied to the resource by any kind of reason"),
ServerRejectReason::Notfound => write!(f, "Resource not found at this time"),
ServerRejectReason::BadMode => write!(f, "The mode specified in `m` key in StreamID is not supported for this request"),
ServerRejectReason::Unacceptable => write!(f, "The requested parameters specified in SocketID cannot be satisfied for the requested resource. Also when m=publish and the data format is not acceptable"),
ServerRejectReason::Conflict => write!(f, "The resource being accessed is already locked for modification. This is in case of m=publish and the specified resource is currently read-only"),
ServerRejectReason::NotSupMedia => write!(f, "The media type is not supported by the application. This is the `t` key that specifies the media type as stream, file and auth, possibly extended by the application"),
ServerRejectReason::Locked => write!(f, "The resource being accessed is locked for any access"),
ServerRejectReason::FailedDepend => write!(f, "The request failed because it specified a dependent session ID that has been disconnected"),
ServerRejectReason::InternalServerError => write!(f, "Unexpected internal server error"),
ServerRejectReason::Unimplemented => write!(f, "The request was recognized, but the current version doesn't support it (unimplemented)"),
ServerRejectReason::Gateway => write!(f, "The server acts as a gateway and the target endpoint rejected the connection"),
ServerRejectReason::Down => write!(f, "The service has been temporarily taken over by a stub reporting this error. The real service can be down for maintenance or crashed"),
ServerRejectReason::Version => write!(f, "SRT version not supported. This might be either unsupported backward compatibility, or an upper value of a version"),
ServerRejectReason::NoRoom => write!(f, "The data stream cannot be archived due to lacking storage space. This is in case when the request type was to send a file or the live stream to be archived"),
}
}
}
#[cfg(test)]
mod test {
use bytes::BytesMut;
use super::*;
use crate::{SeqNumber, SocketId, SrtVersion};
use std::time::Duration;
use std::{convert::TryInto, io::Cursor};
#[test]
fn handshake_ser_des_test() {
let pack = ControlPacket {
timestamp: TimeStamp::from_micros(0),
dest_sockid: SocketId(0),
control_type: ControlTypes::Handshake(HandshakeControlInfo {
init_seq_num: SeqNumber::new_truncate(1_827_131),
max_packet_size: 1500,
max_flow_size: 25600,
shake_type: ShakeType::Conclusion,
socket_id: SocketId(1231),
syn_cookie: 0,
peer_addr: "127.0.0.1".parse().unwrap(),
info: HandshakeVsInfo::V5(HsV5Info {
crypto_size: 0, // TODO: implement
ext_hs: Some(SrtControlPacket::HandshakeResponse(SrtHandshake {
version: SrtVersion::CURRENT,
flags: SrtShakeFlags::NAKREPORT | SrtShakeFlags::TSBPDSND,
send_latency: Duration::from_millis(3000),
recv_latency: Duration::from_millis(12345),
})),
ext_km: None,
sid: None,
}),
}),
};
let mut buf = BytesMut::with_capacity(128);
pack.serialize(&mut buf);
let des = ControlPacket::parse(&mut buf, false).unwrap();
assert!(buf.is_empty());
assert_eq!(pack, des);
}
#[test]
fn ack_ser_des_test() {
let pack = ControlPacket {
timestamp: TimeStamp::from_micros(113_703),
dest_sockid: SocketId(2_453_706_529),
control_type: ControlTypes::Ack(AckControlInfo {
ack_seq_num: 1,
ack_number: SeqNumber::new_truncate(282_049_186),
rtt: Some(TimeSpan::from_micros(10_002)),
rtt_variance: Some(TimeSpan::from_micros(1000)),
buffer_available: Some(1314),
packet_recv_rate: Some(0),
est_link_cap: Some(0),
}),
};
let mut buf = BytesMut::with_capacity(128);
pack.serialize(&mut buf);
let des = ControlPacket::parse(&mut buf, false).unwrap();
assert!(buf.is_empty());
assert_eq!(pack, des);
}
#[test]
fn ack2_ser_des_test() {
let pack = ControlPacket {
timestamp: TimeStamp::from_micros(125_812),
dest_sockid: SocketId(8313),
control_type: ControlTypes::Ack2(831),
};
assert_eq!(pack.control_type.additional_info(), 831);
let mut buf = BytesMut::with_capacity(128);
pack.serialize(&mut buf);
// dword 2 should have 831 in big endian, so the last two bits of the second dword
assert_eq!((u32::from(buf[6]) << 8) + u32::from(buf[7]), 831);
let des = ControlPacket::parse(&mut buf, false).unwrap();
assert!(buf.is_empty());
assert_eq!(pack, des);
}
#[test]
fn raw_srt_packet_test() {
// this was taken from wireshark on a packet from stransmit that crashed
// it is a SRT reject message
let packet_data =
hex::decode("FFFF000000000000000189702BFFEFF2000103010000001E00000078").unwrap();
let packet = ControlPacket::parse(&mut Cursor::new(packet_data), false).unwrap();
assert_eq!(
packet,
ControlPacket {
timestamp: TimeStamp::from_micros(100_720),
dest_sockid: SocketId(738_193_394),
control_type: ControlTypes::Srt(SrtControlPacket::Reject)
}
)
}
#[test]
fn raw_handshake_ipv6() {
let packet_data = hex::decode("8000000000000000000002b00000000000000004000000023c3b0296000005dc00002000000000010669ead20000000000000000000000000000000001000000").unwrap();
let packet = ControlPacket::parse(&mut Cursor::new(&packet_data[..]), true).unwrap();
let r = ControlPacket {
timestamp: TimeStamp::from_micros(688),
dest_sockid: SocketId(0),
control_type: ControlTypes::Handshake(HandshakeControlInfo {
init_seq_num: SeqNumber(1010500246),
max_packet_size: 1500,
max_flow_size: 8192,
shake_type: ShakeType::Induction,
socket_id: SocketId(0x0669EAD2),
syn_cookie: 0,
peer_addr: "::1.0.0.0".parse().unwrap(),
info: HandshakeVsInfo::V4(SocketType::Datagram),
}),
};
assert_eq!(packet, r);
// reserialize it
let mut buf = vec![];
packet.serialize(&mut buf);
assert_eq!(&buf[..], &packet_data[..]);
}
#[test]
fn raw_handshake_srt() {
// this is a example HSv5 conclusion packet from the reference implementation
let packet_data = hex::decode("8000000000000000000F9EC400000000000000050000000144BEA60D000005DC00002000FFFFFFFF3D6936B6E3E405DD0100007F00000000000000000000000000010003000103010000002F00780000").unwrap();
let packet = ControlPacket::parse(&mut Cursor::new(&packet_data[..]), false).unwrap();
assert_eq!(
packet,
ControlPacket {
timestamp: TimeStamp::from_micros(1_023_684),
dest_sockid: SocketId(0),
control_type: ControlTypes::Handshake(HandshakeControlInfo {
init_seq_num: SeqNumber(1_153_345_037),
max_packet_size: 1500,
max_flow_size: 8192,
shake_type: ShakeType::Conclusion,
socket_id: SocketId(1_030_305_462),
syn_cookie: -471_595_555,
peer_addr: "127.0.0.1".parse().unwrap(),
info: HandshakeVsInfo::V5(HsV5Info {
crypto_size: 0,
ext_hs: Some(SrtControlPacket::HandshakeRequest(SrtHandshake {
version: SrtVersion::new(1, 3, 1),
flags: SrtShakeFlags::TSBPDSND
| SrtShakeFlags::TSBPDRCV
| SrtShakeFlags::HAICRYPT
| SrtShakeFlags::TLPKTDROP
| SrtShakeFlags::REXMITFLG,
send_latency: Duration::from_millis(120),
recv_latency: Duration::new(0, 0)
})),
ext_km: None,
sid: None,
})
})
}
);
// reserialize it
let mut buf = vec![];
packet.serialize(&mut buf);
assert_eq!(&buf[..], &packet_data[..]);
}
#[test]
fn raw_handshake_sid() {
// this is an example HSv5 conclusion packet from the reference implementation that has a
// stream id.
let packet_data = hex::decode("800000000000000000000b1400000000000000050000000563444b2e000005dc00002000ffffffff37eb0ee52154fbd60100007f0000000000000000000000000001000300010401000000bf0014001400050003646362616867666500006a69").unwrap();
let packet = ControlPacket::parse(&mut Cursor::new(&packet_data[..]), false).unwrap();
assert_eq!(
packet,
ControlPacket {
timestamp: TimeStamp::from_micros(2836),
dest_sockid: SocketId(0),
control_type: ControlTypes::Handshake(HandshakeControlInfo {
init_seq_num: SeqNumber(1_665_420_078),
max_packet_size: 1500,
max_flow_size: 8192,
shake_type: ShakeType::Conclusion,
socket_id: SocketId(0x37eb0ee5),
syn_cookie: 559_217_622,
peer_addr: "127.0.0.1".parse().unwrap(),
info: HandshakeVsInfo::V5(HsV5Info {
crypto_size: 0,
ext_hs: Some(SrtControlPacket::HandshakeRequest(SrtHandshake {
version: SrtVersion::new(1, 4, 1),
flags: SrtShakeFlags::TSBPDSND
| SrtShakeFlags::TSBPDRCV
| SrtShakeFlags::HAICRYPT
| SrtShakeFlags::REXMITFLG
| SrtShakeFlags::TLPKTDROP
| SrtShakeFlags::NAKREPORT
| SrtShakeFlags::FILTERCAP,
send_latency: Duration::from_millis(20),
recv_latency: Duration::from_millis(20)
})),
ext_km: None,
sid: Some(String::from("abcdefghij")),
})
})
}
);
// reserialize it
let mut buf = vec![];
packet.serialize(&mut buf);
assert_eq!(&buf[..], &packet_data[..]);
}
#[test]
fn raw_handshake_crypto() {
// this is an example HSv5 conclusion packet from the reference implementation that has crypto data embedded.
let packet_data = hex::decode("800000000000000000175E8A0000000000000005000000036FEFB8D8000005DC00002000FFFFFFFF35E790ED5D16CCEA0100007F00000000000000000000000000010003000103010000002F01F401F40003000E122029010000000002000200000004049D75B0AC924C6E4C9EC40FEB4FE973DB1D215D426C18A2871EBF77E2646D9BAB15DBD7689AEF60EC").unwrap();
let packet = ControlPacket::parse(&mut Cursor::new(&packet_data[..]), false).unwrap();
assert_eq!(
packet,
ControlPacket {
timestamp: TimeStamp::from_micros(1_531_530),
dest_sockid: SocketId(0),
control_type: ControlTypes::Handshake(HandshakeControlInfo {
init_seq_num: SeqNumber(1_877_981_400),
max_packet_size: 1_500,
max_flow_size: 8_192,
shake_type: ShakeType::Conclusion,
socket_id: SocketId(904_368_365),
syn_cookie: 1_561_775_338,
peer_addr: "127.0.0.1".parse().unwrap(),
info: HandshakeVsInfo::V5(HsV5Info {
crypto_size: 0,
ext_hs: Some(SrtControlPacket::HandshakeRequest(SrtHandshake {
version: SrtVersion::new(1, 3, 1),
flags: SrtShakeFlags::TSBPDSND
| SrtShakeFlags::TSBPDRCV
| SrtShakeFlags::HAICRYPT
| SrtShakeFlags::TLPKTDROP
| SrtShakeFlags::REXMITFLG,
send_latency: Duration::from_millis(500),
recv_latency: Duration::from_millis(500)
})),
ext_km: Some(SrtControlPacket::KeyManagerRequest(SrtKeyMessage {
pt: PacketType::KeyingMaterial,
key_flags: KeyFlags::EVEN,
keki: 0,
cipher: CipherType::Ctr,
auth: Auth::None,
salt: hex::decode("9D75B0AC924C6E4C9EC40FEB4FE973DB").unwrap(),
wrapped_keys: hex::decode(
"1D215D426C18A2871EBF77E2646D9BAB15DBD7689AEF60EC"
)
.unwrap()
})),
sid: None,
})
})
}
);
let mut buf = vec![];
packet.serialize(&mut buf);
assert_eq!(&buf[..], &packet_data[..])
}
#[test]
fn raw_handshake_crypto_pt2() {
let packet_data = hex::decode("8000000000000000000000000C110D94000000050000000374B7526E000005DC00002000FFFFFFFF18C1CED1F3819B720100007F00000000000000000000000000020003000103010000003F03E803E80004000E12202901000000000200020000000404D3B3D84BE1188A4EBDA4DA16EA65D522D82DE544E1BE06B6ED8128BF15AA4E18EC50EAA95546B101").unwrap();
let _packet = ControlPacket::parse(&mut Cursor::new(&packet_data[..]), false).unwrap();
dbg!(&_packet);
}
#[test]
fn short_ack() {
// this is a packet received from the reference implementation that crashed the parser
let packet_data =
hex::decode("800200000000000e000246e5d96d5e1a389c24780000452900007bb000001fa9")
.unwrap();
let _cp = ControlPacket::parse(&mut Cursor::new(packet_data), false).unwrap();
}
#[test]
fn test_enc_size() {
let pack = ControlPacket {
timestamp: TimeStamp::from_micros(0),
dest_sockid: SocketId(0),
control_type: ControlTypes::Handshake(HandshakeControlInfo {
init_seq_num: SeqNumber(0),
max_packet_size: 1816,
max_flow_size: 0,
shake_type: ShakeType::Conclusion,
socket_id: SocketId(0),
syn_cookie: 0,
peer_addr: [127, 0, 0, 1].into(),
info: HandshakeVsInfo::V5(HsV5Info {
crypto_size: 16,
ext_km: None,
ext_hs: None,
sid: None,
}),
}),
};
let mut ser = BytesMut::with_capacity(128);
pack.serialize(&mut ser);
let pack_deser = ControlPacket::parse(&mut ser, false).unwrap();
assert!(ser.is_empty());
assert_eq!(pack, pack_deser);
}
#[test]
fn test_sid() {
let pack = ControlPacket {
timestamp: TimeStamp::from_micros(0),
dest_sockid: SocketId(0),
control_type: ControlTypes::Handshake(HandshakeControlInfo {
init_seq_num: SeqNumber(0),
max_packet_size: 1816,
max_flow_size: 0,
shake_type: ShakeType::Conclusion,
socket_id: SocketId(0),
syn_cookie: 0,
peer_addr: [127, 0, 0, 1].into(),
info: HandshakeVsInfo::V5(HsV5Info {
crypto_size: 0,
ext_km: None,
ext_hs: None,
sid: Some("Hello hello".into()),
}),
}),
};
let mut ser = BytesMut::with_capacity(128);
pack.serialize(&mut ser);
let pack_deser = ControlPacket::parse(&mut ser, false).unwrap();
assert_eq!(pack, pack_deser);
assert!(ser.is_empty());
}
#[test]
fn test_keepalive() {
let pack = ControlPacket {
timestamp: TimeStamp::from_micros(0),
dest_sockid: SocketId(0),
control_type: ControlTypes::KeepAlive,
};
let mut ser = BytesMut::with_capacity(128);
pack.serialize(&mut ser);
let pack_deser = ControlPacket::parse(&mut ser, false).unwrap();
assert_eq!(pack, pack_deser);
assert!(ser.is_empty());
}
#[test]
fn test_reject_reason_deser_ser() {
assert_eq!(
Ok(RejectReason::Server(ServerRejectReason::Unimplemented)),
<i32 as TryInto<RejectReason>>::try_into(
RejectReason::Server(ServerRejectReason::Unimplemented).into()
)
);
}
#[test]
fn test_unordered_hs_extensions() {
//Taken from Wireshark dump of FFMPEG connection handshake
let packet_data = hex::decode(concat!(
"80000000000000000000dea800000000",
"000000050004000751dca3b8000005b8",
"00002000ffffffff025c84b8da7ee4e7",
"0100007f000000000000000000000000",
"0001000300010402000000bf003c003c",
"000500033a3a212365683d7500000078",
"00030012122029010000000002000200",
"00000408437937d8c23ce2090754c5a7",
"a9e608c14631aef7ac0b8a46b77b8c0b",
"97d4061e565dcb86e4c5cc3701e1f992",
"a5b2de3651c937c94f3333a6"
))
.unwrap();
let packet = ControlPacket::parse(&mut Cursor::new(packet_data), false).unwrap();
let reference = ControlPacket {
timestamp: TimeStamp::from_micros(57000),
dest_sockid: SocketId(0),
control_type: ControlTypes::Handshake(HandshakeControlInfo {
init_seq_num: SeqNumber(1373414328),
max_packet_size: 1464,
max_flow_size: 8192,
shake_type: ShakeType::Conclusion,
socket_id: SocketId(0x025C84B8),
syn_cookie: 0xda7ee4e7u32 as i32,
peer_addr: [127, 0, 0, 1].into(),
info: HandshakeVsInfo::V5(HsV5Info {
crypto_size: 32,
ext_hs: Some(SrtControlPacket::HandshakeRequest(SrtHandshake {
version: SrtVersion::new(1, 4, 2),
flags: SrtShakeFlags::TSBPDSND
| SrtShakeFlags::TSBPDRCV
| SrtShakeFlags::HAICRYPT
| SrtShakeFlags::TLPKTDROP
| SrtShakeFlags::NAKREPORT
| SrtShakeFlags::REXMITFLG
| SrtShakeFlags::FILTERCAP,
send_latency: Duration::from_millis(60),
recv_latency: Duration::from_millis(60)
})),
ext_km: Some(SrtControlPacket::KeyManagerRequest(SrtKeyMessage {
pt: PacketType::KeyingMaterial,
key_flags: KeyFlags::EVEN,
keki: 0,
cipher: CipherType::Ctr,
auth: Auth::None,
salt: hex::decode("437937d8c23ce2090754c5a7a9e608c1").unwrap(),
wrapped_keys: hex::decode(
"4631aef7ac0b8a46b77b8c0b97d4061e565dcb86e4c5cc3701e1f992a5b2de3651c937c94f3333a6"
)
.unwrap()
})),
sid: Some("#!::u=hex".into()),
}),
}),
};
assert_eq!(packet, reference);
}
}
| {
warn!("Handshake contains handshake extension type {} without HSREQ flag!", pack_type);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.