file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
unix.rs | // Copyright (c) 2015-2017 Contributors as noted in the AUTHORS file.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// This file may not be copied, modified, or distributed except according to those terms.
use std::rc::Rc;
use std::io::Result;
use mio::{Ready, PollOpt};
use mio::unix::UnixReady;
use core::Message;
use transport::async::stub::*;
use transport::async::state::*;
use transport::async::dead::Dead;
use transport::pipe::{Event, Context};
use io_error::*;
// pipe readiness value is different from the underlying I/O readiness
// because while an operation is in progress, must be finished
// before being able to start another one.
pub struct Active<S> {
stub: S,
can_send_msg: bool,
can_recv_msg: bool
}
impl<S : AsyncPipeStub> Active<S> {
pub fn new(s: S) -> Active<S> {
Active {
stub: s,
can_send_msg: false,
can_recv_msg: false
}
}
fn on_send_progress(&mut self, ctx: &mut dyn Context, progress: Result<bool>) -> Result<()> {
progress.map(|sent| if sent { self.on_msg_sent(ctx) } )
}
fn on_msg_sent(&mut self, ctx: &mut dyn Context) {
ctx.raise(Event::Sent);
}
fn writable_changed(&mut self, ctx: &mut dyn Context, events: Ready) -> Result<()> {
if events.is_writable() == false {
return Ok(self.change_can_send(ctx, false));
}
if self.stub.has_pending_send() {
let progress = self.stub.resume_send();
return self.on_send_progress(ctx, progress);
}
if UnixReady::from(events).is_hup() == false {
return Ok(self.change_can_send(ctx, true));
}
Ok(())
}
fn change_can_send(&mut self, ctx: &mut dyn Context, can_send: bool) {
if self.can_send_msg != can_send {
self.can_send_msg = can_send;
ctx.raise(Event::CanSend(can_send));
}
}
fn on_recv_progress(&mut self, ctx: &mut dyn Context, progress: Result<Option<Message>>) -> Result<()> {
progress.map(|recv| if let Some(msg) = recv { self.on_msg_received(ctx, msg) } )
}
fn on_msg_received(&mut self, ctx: &mut dyn Context, msg: Message) {
ctx.raise(Event::Received(msg));
}
fn readable_changed(&mut self, ctx: &mut dyn Context, events: Ready) -> Result<()> {
if events.is_readable() == false {
return Ok(self.change_can_recv(ctx, false));
}
if self.stub.has_pending_recv() {
let progress = self.stub.resume_recv();
return self.on_recv_progress(ctx, progress);
}
if UnixReady::from(events).is_hup() == false {
return Ok(self.change_can_recv(ctx, true));
}
Ok(())
}
fn change_can_recv(&mut self, ctx: &mut dyn Context, can_recv: bool) {
if self.can_recv_msg != can_recv {
self.can_recv_msg = can_recv;
ctx.raise(Event::CanRecv(can_recv));
}
}
fn hang_up_changed(&mut self, hup: bool) -> Result<()> {
if hup {
self.can_send_msg = false;
self.can_recv_msg = false;
Err(other_io_error("hup"))
} else {
Ok(())
}
}
}
fn interest() -> Ready {
let interest = Ready::readable() | Ready::writable();
let unix_interest = UnixReady::from(interest) | UnixReady::hup() | UnixReady::error();
Ready::from(unix_interest)
}
impl<S : AsyncPipeStub + 'static> PipeState<S> for Active<S> {
fn name(&self) -> &'static str {"Active"}
fn enter(&mut self, ctx: &mut dyn Context) {
ctx.reregister(self.stub.deref(), interest(), PollOpt::level());
ctx.raise(Event::Opened);
}
fn close(self: Box<Self>, ctx: &mut dyn Context) -> Box<dyn PipeState<S>> {
ctx.deregister(self.stub.deref());
Box::new(Dead)
}
fn send(mut self: Box<Self>, ctx: &mut dyn Context, msg: Rc<Message>) -> Box<dyn PipeState<S>> {
self.can_send_msg = false;
let progress = self.stub.start_send(msg);
let res = self.on_send_progress(ctx, progress);
no_transition_if_ok(self, ctx, res)
}
fn recv(mut self: Box<Self>, ctx: &mut dyn Context) -> Box<dyn PipeState<S>> {
self.can_recv_msg = false;
let progress = self.stub.start_recv();
let res = self.on_recv_progress(ctx, progress);
no_transition_if_ok(self, ctx, res)
}
fn ready(mut self: Box<Self>, ctx: &mut dyn Context, events: Ready) -> Box<dyn PipeState<S>> {
let res =
self.readable_changed(ctx, events).and_then(|_|
self.writable_changed(ctx, events).and_then(|_|
self.hang_up_changed(UnixReady::from(events).is_hup()))
);
no_transition_if_ok(self, ctx, res)
}
}
#[cfg(test)]
mod tests {
use std::rc::Rc;
use std::cell::RefCell;
use mio;
use mio::unix::UnixReady;
use core::Message;
use transport::*;
use transport::tests::*;
use transport::async::state::*;
use transport::async::tests::*;
use transport::async::active::*;
#[test]
fn on_enter_stub_is_reregistered_and_an_event_is_raised() {
let sensor_srv = TestStepStreamSensor::new();
let sensor = Rc::new(RefCell::new(sensor_srv));
let stub = TestStepStream::with_sensor(sensor.clone());
let mut state = Box::new(Active::new(stub));
let mut ctx = TestPipeContext::new();
state.enter(&mut ctx);
assert_eq!(0, ctx.get_registrations().len());
assert_eq!(1, ctx.get_reregistrations().len());
assert_eq!(0, ctx.get_deregistrations());
let (ref interest, ref poll_opt) = ctx.get_reregistrations()[0];
let all = super::interest();
let level = mio::PollOpt::level();
assert_eq!(&all, interest);
assert_eq!(&level, poll_opt);
assert_eq!(1, ctx.get_raised_events().len());
let evt = &ctx.get_raised_events()[0];
let is_opened = match *evt {
pipe::Event::Opened => true,
_ => false,
};
assert!(is_opened);
}
#[test]
fn close_should_deregister_and_cause_a_transition_to_dead() {
let stub = TestStepStream::new();
let state = Box::new(Active::new(stub));
let mut ctx = TestPipeContext::new();
let new_state = state.close(&mut ctx);
assert_eq!(0, ctx.get_registrations().len());
assert_eq!(0, ctx.get_reregistrations().len());
assert_eq!(1, ctx.get_deregistrations());
assert_eq!("Dead", new_state.name());
}
#[test]
fn send_with_immediate_success() {
let sensor_srv = TestStepStreamSensor::new();
let sensor = Rc::new(RefCell::new(sensor_srv));
let stub = TestStepStream::with_sensor(sensor.clone());
let state = Box::new(Active::new(stub));
let mut ctx = TestPipeContext::new();
let payload = vec!(66, 65, 67);
let msg = Rc::new(Message::from_body(payload));
let new_state = state.send(&mut ctx, msg);
assert_eq!("Active", new_state.name());
assert_eq!(1, ctx.get_raised_events().len());
let evt = &ctx.get_raised_events()[0];
let is_sent = match *evt {
pipe::Event::Sent => true,
_ => false,
};
assert!(is_sent);
}
#[test]
fn send_with_postponed_success() {
let sensor_srv = TestStepStreamSensor::new();
let sensor = Rc::new(RefCell::new(sensor_srv));
let stub = TestStepStream::with_sensor(sensor.clone());
let state = Box::new(Active::new(stub));
let mut ctx = TestPipeContext::new();
sensor.borrow_mut().set_start_send_result(Some(false));
let payload = vec!(66, 65, 67);
let msg = Rc::new(Message::from_body(payload));
let new_state = state.send(&mut ctx, msg);
assert_eq!("Active", new_state.name());
assert_eq!(0, ctx.get_raised_events().len());
sensor.borrow_mut().set_resume_send_result(Some(true));
let events = mio::Ready::writable();
let new_state = new_state.ready(&mut ctx, events);
assert_eq!("Active", new_state.name());
assert_eq!(1, ctx.get_raised_events().len());
let evt = &ctx.get_raised_events()[0];
let is_sent = match *evt {
pipe::Event::Sent => true,
_ => false,
};
assert!(is_sent);
}
#[test]
fn when_writable_should_raise_an_event() {
let sensor_srv = TestStepStreamSensor::new();
let sensor = Rc::new(RefCell::new(sensor_srv));
let stub = TestStepStream::with_sensor(sensor.clone());
let state = Box::new(Active::new(stub));
let mut ctx = TestPipeContext::new();
let events = mio::Ready::writable();
let new_state = state.ready(&mut ctx, events);
assert_eq!("Active", new_state.name());
assert_eq!(1, ctx.get_raised_events().len());
let evt = &ctx.get_raised_events()[0];
let is_can_send = match *evt {
pipe::Event::CanSend(x) => x,
_ => false,
};
assert!(is_can_send);
}
#[test]
fn when_writable_should_raise_an_event_unless_no_change() {
let sensor_srv = TestStepStreamSensor::new();
let sensor = Rc::new(RefCell::new(sensor_srv));
let stub = TestStepStream::with_sensor(sensor.clone());
let state = Box::new(Active::new(stub));
let mut ctx = TestPipeContext::new();
let events = mio::Ready::writable();
let new_state = state.ready(&mut ctx, events);
assert_eq!("Active", new_state.name());
assert_eq!(1, ctx.get_raised_events().len());
let new_state = new_state.ready(&mut ctx, events);
assert_eq!("Active", new_state.name());
assert_eq!(1, ctx.get_raised_events().len());
}
#[test]
fn when_writable_hup_should_not_raise_an_event_and_transition_to_dead() {
let sensor_srv = TestStepStreamSensor::new();
let sensor = Rc::new(RefCell::new(sensor_srv));
let stub = TestStepStream::with_sensor(sensor.clone());
let state = Box::new(Active::new(stub));
let mut ctx = TestPipeContext::new();
let hup = UnixReady::hup();
let writable = mio::Ready::writable();
let events = writable | mio::Ready::from(hup);
let new_state = state.ready(&mut ctx, events);
assert_eq!("Dead", new_state.name());
assert_eq!(1, ctx.get_raised_events().len());
let evt = &ctx.get_raised_events()[0];
let is_error = match *evt {
pipe::Event::Error(_) => true,
_ => false,
};
assert!(is_error);
}
#[test]
fn recv_with_immediate_success() {
let sensor_srv = TestStepStreamSensor::new();
let sensor = Rc::new(RefCell::new(sensor_srv));
let stub = TestStepStream::with_sensor(sensor.clone());
let state = Box::new(Active::new(stub));
let mut ctx = TestPipeContext::new();
let payload = vec!(66, 65, 67);
let msg = Message::from_body(payload);
sensor.borrow_mut().set_start_recv_result(Some(msg));
let new_state = state.recv(&mut ctx);
assert_eq!("Active", new_state.name());
assert_eq!(1, ctx.get_raised_events().len());
let evt = &ctx.get_raised_events()[0];
let is_recv = match *evt {
pipe::Event::Received(_) => true,
_ => false,
};
assert!(is_recv);
}
#[test]
fn recv_with_postponed_success() {
let sensor_srv = TestStepStreamSensor::new();
let sensor = Rc::new(RefCell::new(sensor_srv));
let stub = TestStepStream::with_sensor(sensor.clone());
let state = Box::new(Active::new(stub));
let mut ctx = TestPipeContext::new();
let payload = vec!(66, 65, 67);
let msg = Message::from_body(payload);
sensor.borrow_mut().set_start_recv_result(None);
let new_state = state.recv(&mut ctx);
assert_eq!("Active", new_state.name());
assert_eq!(0, ctx.get_raised_events().len());
sensor.borrow_mut().set_resume_recv_result(Some(msg));
let events = mio::Ready::readable();
let new_state = new_state.ready(&mut ctx, events);
assert_eq!("Active", new_state.name());
assert_eq!(1, ctx.get_raised_events().len());
let evt = &ctx.get_raised_events()[0];
let is_recv = match *evt {
pipe::Event::Received(_) => true,
_ => false,
};
assert!(is_recv);
}
#[test]
fn when_readable_should_raise_an_event() {
let sensor_srv = TestStepStreamSensor::new();
let sensor = Rc::new(RefCell::new(sensor_srv));
let stub = TestStepStream::with_sensor(sensor.clone());
let state = Box::new(Active::new(stub));
let mut ctx = TestPipeContext::new();
let events = mio::Ready::readable();
let new_state = state.ready(&mut ctx, events);
assert_eq!("Active", new_state.name());
assert_eq!(1, ctx.get_raised_events().len());
let evt = &ctx.get_raised_events()[0];
let is_can_recv = match *evt {
pipe::Event::CanRecv(x) => x,
_ => false,
};
assert!(is_can_recv);
}
#[test]
fn when_readable_hup_should_not_raise_an_event_and_transition_to_dead() {
let sensor_srv = TestStepStreamSensor::new();
let sensor = Rc::new(RefCell::new(sensor_srv));
let stub = TestStepStream::with_sensor(sensor.clone());
let state = Box::new(Active::new(stub));
let mut ctx = TestPipeContext::new();
let readable = mio::Ready::readable();
let hup = UnixReady::hup();
let events = readable | mio::Ready::from(hup);
let new_state = state.ready(&mut ctx, events);
assert_eq!("Dead", new_state.name());
assert_eq!(1, ctx.get_raised_events().len());
let evt = &ctx.get_raised_events()[0];
let is_error = match *evt {
pipe::Event::Error(_) => true,
_ => false,
};
assert!(is_error);
} | } |
|
customJSONEncoder.py | from flask.json import JSONEncoder
from node import Node
from application import Application
class CustomJSONEncoder(JSONEncoder):
def default(self, obj): | 'node_id' : obj.node_id,
'image' : obj.image,
'ip_addr' : obj.ip_addr
}
if isinstance(obj, Application):
return {
'name' : obj.name,
'nb_nodes' : obj.nb_nodes,
'threshold' : obj.threshold,
'ratio' : obj.ratio
}
return super(JSONEncoder, self).default(obj) | if isinstance(obj, Node):
return { |
semrel.go | // Package semrel processes version control data using an analyser
// function, to produce data for a release note
package semrel
import (
"fmt"
"time"
"github.com/blang/semver"
)
// BumpLevel of the release and/or individual commit
type BumpLevel int
// BumpLevel values
const (
NoBump BumpLevel = iota
BumpPatch = iota
BumpMinor = iota
BumpMajor = iota
)
// ChangeAnalyzer analyzes a commit message and returns 0 or more entries to release note
type ChangeAnalyzer interface {
Analyze(commit *Commit) ([]Change, error)
}
// VCSData contains data collected from version control system
type VCSData struct {
CurrentVersion semver.Version
UnreleasedCommits []Commit
// Time of the commit being released
Time time.Time
}
// Commit contains VCS commit data
type Commit struct {
Msg string
SHA string
Time time.Time
PreReleased bool
IsMerge bool
}
// ByTime implements sort.Interface for []Commit based on Time().
type ByTime []Commit
func (a ByTime) Len() int { return len(a) }
func (a ByTime) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByTime) Less(i, j int) bool { return a[i].Time.Before(a[j].Time) }
// Change captures ChangeAnalyzer results
type Change interface {
Category() string
BumpLevel() BumpLevel
PreReleased() bool
}
// ReleaseData contains information for next release
type ReleaseData struct {
CurrentVersion semver.Version
NextVersion semver.Version
BumpLevel BumpLevel
Changes map[string][]Change
// Time of the commit being released
Time time.Time
}
// Release processes the release data
func Release(input *VCSData, analyzer ChangeAnalyzer) (*ReleaseData, error) {
output := &ReleaseData{
CurrentVersion: input.CurrentVersion,
NextVersion: input.CurrentVersion,
BumpLevel: NoBump,
Changes: map[string][]Change{},
Time: input.Time,
}
for _, commit := range input.UnreleasedCommits {
changes, err := analyzer.Analyze(&commit)
if err != nil {
return nil, err
}
for _, change := range changes {
if category, catOK := output.Changes[change.Category()]; catOK | else {
output.Changes[change.Category()] = []Change{change}
}
if change.BumpLevel() > output.BumpLevel {
output.BumpLevel = change.BumpLevel()
}
}
}
output.NextVersion = bump(output.CurrentVersion, output.BumpLevel)
return output, nil
}
func bump(curr semver.Version, bumpLevel BumpLevel) semver.Version {
var major uint64
var minor uint64
var patch uint64
if bumpLevel == NoBump {
return semver.MustParse(curr.String())
}
if bumpLevel == BumpMajor && curr.Major > 0 {
major = curr.Major + 1
}
if bumpLevel == BumpMinor || (curr.Major == 0 && bumpLevel == BumpMajor) {
major = curr.Major
minor = curr.Minor + 1
}
if bumpLevel == BumpPatch {
major = curr.Major
minor = curr.Minor
patch = curr.Patch + 1
}
return semver.MustParse(fmt.Sprintf("%d.%d.%d", major, minor, patch))
}
| {
output.Changes[change.Category()] = append(category, change)
} |
model_inline_response_200_57.go | /*
* Jumpserver API Docs
*
* Jumpserver Restful api docs
*
* API version: v1
* Contact: [email protected]
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package swagger
type InlineResponse20057 struct {
Count int32 `json:"count"`
Next string `json:"next,omitempty"`
Previous string `json:"previous,omitempty"` | Results []Status `json:"results"`
} |
|
gulpfile.js | /*=============================================
= Gulp Starter by @dope =
=============================================*/
/**
*
* The packages we are using
* Not using gulp-load-plugins as it is nice to see whats here.
*
**/
var gulp = require('gulp');
var sass = require('gulp-sass');
var browserSync = require('browser-sync');
var prefix = require('gulp-autoprefixer');
var plumber = require('gulp-plumber');
var uglify = require('gulp-uglify');
var rename = require("gulp-rename");
var imagemin = require("gulp-imagemin");
var pngquant = require('imagemin-pngquant');
/**
*
* Styles
* - Compile
* - Compress/Minify
* - Catch errors (gulp-plumber)
* - Autoprefixer
*
**/
gulp.task('sass', function() {
gulp.src('sass/**/*.scss')
.pipe(sass({outputStyle: 'compressed'}))
.pipe(prefix('last 2 versions', '> 1%', 'ie 8', 'Android 2', 'Firefox ESR'))
.pipe(plumber())
.pipe(gulp.dest('css'));
});
/**
*
* BrowserSync.io
* - Watch CSS, JS & HTML for changes
* - View project at: localhost:3000
*
**/
gulp.task('browser-sync', function() {
browserSync.init(['css/*.css', 'js/**/*.js', 'index.html'], {
server: {
baseDir: './'
}
});
});
/**
*
* Javascript
* - Uglify
*
**/
gulp.task('scripts', function() {
gulp.src('js/*.js')
.pipe(uglify())
.pipe(rename({
dirname: "min",
suffix: ".min",
}))
.pipe(gulp.dest('js'))
});
/**
*
* Images
* - Compress them!
*
**/
gulp.task('images', function () {
return gulp.src('images/*')
.pipe(imagemin({
progressive: true,
svgoPlugins: [{removeViewBox: false}],
use: [pngquant()]
}))
.pipe(gulp.dest('images'));
});
/**
*
* Default task
* - Runs sass, browser-sync, scripts and image tasks
* - Watchs for file changes for images, scripts and sass/css
*
**/
gulp.task('default', ['sass', 'browser-sync', 'scripts', 'images'], function () { | gulp.watch('sass/**/*.scss', ['sass']);
gulp.watch('js/**/*.js', ['scripts']);
gulp.watch('images/*', ['images']);
}); |
|
contract.rs | use cosmwasm_std::{
entry_point,
has_coins,
to_binary,
BankMsg,
Binary,
Coin,
CosmosMsg,
Deps,
DepsMut,
Env,
MessageInfo,
Response,
StdError,
StdResult,
Storage,
WasmMsg,
};
use crate::{
byte_utils::{
extend_address_to_32,
ByteUtils,
},
error::ContractError,
msg::{
ExecuteMsg,
GetAddressHexResponse,
GetStateResponse,
GuardianSetInfoResponse,
InstantiateMsg,
MigrateMsg,
QueryMsg,
},
state::{
config,
config_read,
guardian_set_get,
guardian_set_set,
sequence_read,
sequence_set,
vaa_archive_add,
vaa_archive_check,
ConfigInfo,
ContractUpgrade,
GovernancePacket,
GuardianAddress,
GuardianSetInfo,
GuardianSetUpgrade,
ParsedVAA,
SetFee,
TransferFee,
},
};
use k256::{
ecdsa::{
recoverable::{
Id as RecoverableId,
Signature as RecoverableSignature,
},
Signature,
VerifyingKey,
},
EncodedPoint,
};
use sha3::{
Digest,
Keccak256,
};
use generic_array::GenericArray;
use std::convert::TryFrom;
type HumanAddr = String;
// Chain ID of Terra
const CHAIN_ID: u16 = 3;
// Lock assets fee amount and denomination
const FEE_AMOUNT: u128 = 10000;
pub const FEE_DENOMINATION: &str = "uluna";
#[cfg_attr(not(feature = "library"), entry_point)]
pub fn migrate(_deps: DepsMut, _env: Env, _msg: MigrateMsg) -> StdResult<Response> {
Ok(Response::default())
}
#[cfg_attr(not(feature = "library"), entry_point)]
pub fn instantiate(
deps: DepsMut,
_env: Env,
_info: MessageInfo,
msg: InstantiateMsg,
) -> StdResult<Response> {
// Save general wormhole info
let state = ConfigInfo {
gov_chain: msg.gov_chain,
gov_address: msg.gov_address.as_slice().to_vec(),
guardian_set_index: 0,
guardian_set_expirity: msg.guardian_set_expirity,
fee: Coin::new(FEE_AMOUNT, FEE_DENOMINATION), // 0.01 Luna (or 10000 uluna) fee by default
};
config(deps.storage).save(&state)?;
// Add initial guardian set to storage
guardian_set_set(
deps.storage,
state.guardian_set_index,
&msg.initial_guardian_set,
)?;
Ok(Response::default())
}
#[cfg_attr(not(feature = "library"), entry_point)]
pub fn execute(deps: DepsMut, env: Env, info: MessageInfo, msg: ExecuteMsg) -> StdResult<Response> {
match msg {
ExecuteMsg::PostMessage { message, nonce } => {
handle_post_message(deps, env, info, &message.as_slice(), nonce)
}
ExecuteMsg::SubmitVAA { vaa } => handle_submit_vaa(deps, env, info, vaa.as_slice()),
}
}
/// Process VAA message signed by quardians
fn handle_submit_vaa(
deps: DepsMut,
env: Env,
_info: MessageInfo,
data: &[u8],
) -> StdResult<Response> {
let state = config_read(deps.storage).load()?;
let vaa = parse_and_verify_vaa(deps.storage, data, env.block.time.seconds())?;
vaa_archive_add(deps.storage, vaa.hash.as_slice())?;
if state.gov_chain == vaa.emitter_chain && state.gov_address == vaa.emitter_address {
if state.guardian_set_index != vaa.guardian_set_index {
return Err(StdError::generic_err(
"governance VAAs must be signed by the current guardian set",
));
}
return handle_governance_payload(deps, env, &vaa.payload);
}
ContractError::InvalidVAAAction.std_err()
}
fn handle_governance_payload(deps: DepsMut, env: Env, data: &Vec<u8>) -> StdResult<Response> {
let gov_packet = GovernancePacket::deserialize(&data)?;
let module = String::from_utf8(gov_packet.module).unwrap();
let module: String = module.chars().filter(|c| c != &'\0').collect();
if module != "Core" {
return Err(StdError::generic_err("this is not a valid module"));
}
if gov_packet.chain != 0 && gov_packet.chain != CHAIN_ID {
return Err(StdError::generic_err(
"the governance VAA is for another chain",
));
}
match gov_packet.action {
1u8 => vaa_update_contract(deps, env, &gov_packet.payload),
2u8 => vaa_update_guardian_set(deps, env, &gov_packet.payload),
3u8 => handle_set_fee(deps, env, &gov_packet.payload),
4u8 => handle_transfer_fee(deps, env, &gov_packet.payload),
_ => ContractError::InvalidVAAAction.std_err(),
}
}
/// Parses raw VAA data into a struct and verifies whether it contains sufficient signatures of an
/// active guardian set i.e. is valid according to Wormhole consensus rules
fn parse_and_verify_vaa(
storage: &dyn Storage,
data: &[u8],
block_time: u64,
) -> StdResult<ParsedVAA> {
let vaa = ParsedVAA::deserialize(data)?;
if vaa.version != 1 {
return ContractError::InvalidVersion.std_err();
}
// Check if VAA with this hash was already accepted
if vaa_archive_check(storage, vaa.hash.as_slice()) {
return ContractError::VaaAlreadyExecuted.std_err();
}
// Load and check guardian set
let guardian_set = guardian_set_get(storage, vaa.guardian_set_index);
let guardian_set: GuardianSetInfo =
guardian_set.or_else(|_| ContractError::InvalidGuardianSetIndex.std_err())?;
if guardian_set.expiration_time != 0 && guardian_set.expiration_time < block_time {
return ContractError::GuardianSetExpired.std_err();
}
if (vaa.len_signers as usize) < guardian_set.quorum() {
return ContractError::NoQuorum.std_err();
}
// Verify guardian signatures
let mut last_index: i32 = -1;
let mut pos = ParsedVAA::HEADER_LEN;
for _ in 0..vaa.len_signers {
if pos + ParsedVAA::SIGNATURE_LEN > data.len() {
return ContractError::InvalidVAA.std_err();
}
let index = data.get_u8(pos) as i32;
if index <= last_index {
return ContractError::WrongGuardianIndexOrder.std_err();
}
last_index = index;
let signature = Signature::try_from(
&data[pos + ParsedVAA::SIG_DATA_POS
..pos + ParsedVAA::SIG_DATA_POS + ParsedVAA::SIG_DATA_LEN],
)
.or_else(|_| ContractError::CannotDecodeSignature.std_err())?;
let id = RecoverableId::new(data.get_u8(pos + ParsedVAA::SIG_RECOVERY_POS))
.or_else(|_| ContractError::CannotDecodeSignature.std_err())?;
let recoverable_signature = RecoverableSignature::new(&signature, id)
.or_else(|_| ContractError::CannotDecodeSignature.std_err())?;
let verify_key = recoverable_signature
.recover_verify_key_from_digest_bytes(GenericArray::from_slice(vaa.hash.as_slice()))
.or_else(|_| ContractError::CannotRecoverKey.std_err())?;
let index = index as usize;
if index >= guardian_set.addresses.len() {
return ContractError::TooManySignatures.std_err();
}
if !keys_equal(&verify_key, &guardian_set.addresses[index]) {
return ContractError::GuardianSignatureError.std_err();
}
pos += ParsedVAA::SIGNATURE_LEN; |
Ok(vaa)
}
fn vaa_update_guardian_set(deps: DepsMut, env: Env, data: &Vec<u8>) -> StdResult<Response> {
/* Payload format
0 uint32 new_index
4 uint8 len(keys)
5 [][20]uint8 guardian addresses
*/
let mut state = config_read(deps.storage).load()?;
let GuardianSetUpgrade {
new_guardian_set_index,
new_guardian_set,
} = GuardianSetUpgrade::deserialize(&data)?;
if new_guardian_set_index != state.guardian_set_index + 1 {
return ContractError::GuardianSetIndexIncreaseError.std_err();
}
let old_guardian_set_index = state.guardian_set_index;
state.guardian_set_index = new_guardian_set_index;
guardian_set_set(deps.storage, state.guardian_set_index, &new_guardian_set)?;
config(deps.storage).save(&state)?;
let mut old_guardian_set = guardian_set_get(deps.storage, old_guardian_set_index)?;
old_guardian_set.expiration_time = env.block.time.seconds() + state.guardian_set_expirity;
guardian_set_set(deps.storage, old_guardian_set_index, &old_guardian_set)?;
Ok(Response::new()
.add_attribute("action", "guardian_set_change")
.add_attribute("old", old_guardian_set_index.to_string())
.add_attribute("new", state.guardian_set_index.to_string()))
}
fn vaa_update_contract(_deps: DepsMut, env: Env, data: &Vec<u8>) -> StdResult<Response> {
/* Payload format
0 [][32]uint8 new_contract
*/
let ContractUpgrade { new_contract } = ContractUpgrade::deserialize(&data)?;
Ok(Response::new()
.add_message(CosmosMsg::Wasm(WasmMsg::Migrate {
contract_addr: env.contract.address.to_string(),
new_code_id: new_contract,
msg: to_binary(&MigrateMsg {})?,
}))
.add_attribute("action", "contract_upgrade"))
}
pub fn handle_set_fee(deps: DepsMut, _env: Env, data: &Vec<u8>) -> StdResult<Response> {
let set_fee_msg = SetFee::deserialize(&data)?;
// Save new fees
let mut state = config_read(deps.storage).load()?;
state.fee = set_fee_msg.fee;
config(deps.storage).save(&state)?;
Ok(Response::new()
.add_attribute("action", "fee_change")
.add_attribute("new_fee.amount", state.fee.amount.to_string())
.add_attribute("new_fee.denom", state.fee.denom.to_string()))
}
pub fn handle_transfer_fee(deps: DepsMut, _env: Env, data: &Vec<u8>) -> StdResult<Response> {
let transfer_msg = TransferFee::deserialize(&data)?;
Ok(Response::new().add_message(CosmosMsg::Bank(BankMsg::Send {
to_address: deps.api.addr_humanize(&transfer_msg.recipient)?.to_string(),
amount: vec![transfer_msg.amount],
})))
}
fn handle_post_message(
deps: DepsMut,
env: Env,
info: MessageInfo,
message: &[u8],
nonce: u32,
) -> StdResult<Response> {
let state = config_read(deps.storage).load()?;
let fee = state.fee;
// Check fee
if !has_coins(info.funds.as_ref(), &fee) {
return ContractError::FeeTooLow.std_err();
}
let emitter = extend_address_to_32(&deps.api.addr_canonicalize(&info.sender.as_str())?);
let sequence = sequence_read(deps.storage, emitter.as_slice());
sequence_set(deps.storage, emitter.as_slice(), sequence + 1)?;
Ok(Response::new()
.add_attribute("message.message", hex::encode(message))
.add_attribute("message.sender", hex::encode(emitter))
.add_attribute("message.chain_id", CHAIN_ID.to_string())
.add_attribute("message.nonce", nonce.to_string())
.add_attribute("message.sequence", sequence.to_string())
.add_attribute("message.block_time", env.block.time.seconds().to_string()))
}
#[cfg_attr(not(feature = "library"), entry_point)]
pub fn query(deps: Deps, _env: Env, msg: QueryMsg) -> StdResult<Binary> {
match msg {
QueryMsg::GuardianSetInfo {} => to_binary(&query_guardian_set_info(deps)?),
QueryMsg::VerifyVAA { vaa, block_time } => to_binary(&query_parse_and_verify_vaa(
deps,
&vaa.as_slice(),
block_time,
)?),
QueryMsg::GetState {} => to_binary(&query_state(deps)?),
QueryMsg::QueryAddressHex { address } => to_binary(&query_address_hex(deps, &address)?),
}
}
pub fn query_guardian_set_info(deps: Deps) -> StdResult<GuardianSetInfoResponse> {
let state = config_read(deps.storage).load()?;
let guardian_set = guardian_set_get(deps.storage, state.guardian_set_index)?;
let res = GuardianSetInfoResponse {
guardian_set_index: state.guardian_set_index,
addresses: guardian_set.addresses,
};
Ok(res)
}
pub fn query_parse_and_verify_vaa(
deps: Deps,
data: &[u8],
block_time: u64,
) -> StdResult<ParsedVAA> {
parse_and_verify_vaa(deps.storage, data, block_time)
}
// returns the hex of the 32 byte address we use for some address on this chain
pub fn query_address_hex(deps: Deps, address: &HumanAddr) -> StdResult<GetAddressHexResponse> {
Ok(GetAddressHexResponse {
hex: hex::encode(extend_address_to_32(&deps.api.addr_canonicalize(&address)?)),
})
}
pub fn query_state(deps: Deps) -> StdResult<GetStateResponse> {
let state = config_read(deps.storage).load()?;
let res = GetStateResponse { fee: state.fee };
Ok(res)
}
fn keys_equal(a: &VerifyingKey, b: &GuardianAddress) -> bool {
let mut hasher = Keccak256::new();
let point: EncodedPoint = EncodedPoint::from(a);
let point = point.decompress();
if bool::from(point.is_none()) {
return false;
}
let point = point.unwrap();
hasher.update(&point.as_bytes()[1..]);
let a = &hasher.finalize()[12..];
let b = &b.bytes;
if a.len() != b.len() {
return false;
}
for (ai, bi) in a.iter().zip(b.as_slice().iter()) {
if ai != bi {
return false;
}
}
true
} | } |
upload.go | package pcscommand
import (
"fmt"
"github.com/qjfoidnh/BaiduPCS-Go/baidupcs"
"github.com/qjfoidnh/BaiduPCS-Go/internal/pcsconfig"
"github.com/qjfoidnh/BaiduPCS-Go/internal/pcsfunctions/pcsupload"
"github.com/qjfoidnh/BaiduPCS-Go/pcstable"
"github.com/qjfoidnh/BaiduPCS-Go/pcsutil"
"github.com/qjfoidnh/BaiduPCS-Go/pcsutil/checksum"
"github.com/qjfoidnh/BaiduPCS-Go/pcsutil/converter"
"github.com/qjfoidnh/BaiduPCS-Go/pcsutil/taskframework"
"os"
"path"
"path/filepath"
"strings"
)
const (
// DefaultUploadMaxRetry 默认上传失败最大重试次数
DefaultUploadMaxRetry = 3
)
type (
// UploadOptions 上传可选项
UploadOptions struct {
Parallel int
MaxRetry int
Load int
NoRapidUpload bool
NoSplitFile bool // 禁用分片上传
Policy string // 同名文件处理策略
}
)
func uploadPrintFormat(load int) string {
if load <= 1 {
return | PrintFormat
}
return "[%s] ↑ %s/%s %s/s in %s ...\n"
}
// RunRapidUpload 执行秒传文件, 前提是知道文件的大小, md5, 前256KB切片的 md5, crc32
func RunRapidUpload(targetPath, contentMD5, sliceMD5, crc32 string, length int64) {
dirname := path.Dir(targetPath)
err := matchPathByShellPatternOnce(&dirname)
if err != nil {
fmt.Printf("警告: %s, 获取网盘路径 %s 错误, %s\n", baidupcs.OperationRapidUpload, dirname, err)
}
err = GetBaiduPCS().RapidUpload(targetPath, contentMD5, sliceMD5, crc32, length)
if err != nil {
fmt.Printf("%s失败, 消息: %s\n", baidupcs.OperationRapidUpload, err)
return
}
fmt.Printf("%s成功, 保存到网盘路径: %s\n", baidupcs.OperationRapidUpload, targetPath)
return
}
// RunCreateSuperFile 执行分片上传—合并分片文件
func RunCreateSuperFile(policy string, targetPath string, blockList ...string) {
err := matchPathByShellPatternOnce(&targetPath)
if err != nil {
fmt.Printf("警告: %s, 获取网盘路径 %s 错误, %s\n", baidupcs.OperationUploadCreateSuperFile, targetPath, err)
}
err = GetBaiduPCS().UploadCreateSuperFile(policy, true, targetPath, blockList...)
if err != nil {
fmt.Printf("%s失败, 消息: %s\n", baidupcs.OperationUploadCreateSuperFile, err)
return
}
fmt.Printf("%s成功, 保存到网盘路径: %s\n", baidupcs.OperationUploadCreateSuperFile, targetPath)
return
}
// RunUpload 执行文件上传
func RunUpload(localPaths []string, savePath string, opt *UploadOptions) {
if opt == nil {
opt = &UploadOptions{}
}
// 检测opt
if opt.Parallel <= 0 {
opt.Parallel = pcsconfig.Config.MaxUploadParallel
}
if opt.MaxRetry < 0 {
opt.MaxRetry = DefaultUploadMaxRetry
}
if opt.Load <=0 {
opt.Load = pcsconfig.Config.MaxUploadLoad
}
if opt.Policy!="fail" && opt.Policy!="newcopy" && opt.Policy!="overwrite" && opt.Policy!="skip" {
opt.Policy = pcsconfig.Config.UPolicy
}
err := matchPathByShellPatternOnce(&savePath)
if err != nil {
fmt.Printf("警告: 上传文件, 获取网盘路径 %s 错误, %s\n", savePath, err)
}
switch len(localPaths) {
case 0:
fmt.Printf("本地路径为空\n")
return
}
// 打开上传状态
uploadDatabase, err := pcsupload.NewUploadingDatabase()
if err != nil {
fmt.Printf("打开上传未完成数据库错误: %s\n", err)
return
}
defer uploadDatabase.Close()
var (
pcs = GetBaiduPCS()
// 使用 task framework
executor = &taskframework.TaskExecutor{
IsFailedDeque: true, // 失败统计
}
subSavePath string
// 统计
statistic = &pcsupload.UploadStatistic{}
)
fmt.Print("\n")
fmt.Printf("[0] 提示: 当前上传单个文件最大并发量为: %d, 最大同时上传文件数为: %d\n", opt.Parallel, opt.Load)
statistic.StartTimer() // 开始计时
LoadCount := 0
for k := range localPaths {
walkedFiles, err := pcsutil.WalkDir(localPaths[k], "")
if err != nil {
fmt.Printf("警告: 遍历错误: %s\n", err)
continue
}
for k3 := range walkedFiles {
var localPathDir string
// 针对 windows 的目录处理
if os.PathSeparator == '\\' {
walkedFiles[k3] = pcsutil.ConvertToUnixPathSeparator(walkedFiles[k3])
localPathDir = pcsutil.ConvertToUnixPathSeparator(filepath.Dir(localPaths[k]))
} else {
localPathDir = filepath.Dir(localPaths[k])
}
// 避免去除文件名开头的"."
if localPathDir == "." {
localPathDir = ""
}
if len(localPaths) == 1 && len(walkedFiles) == 1 {
opt.Load = 1
}
subSavePath = strings.TrimPrefix(walkedFiles[k3], localPathDir)
if !pcsutil.ChPathLegal(walkedFiles[k3]) {
fmt.Printf("[0] %s 文件路径含有非法字符,已跳过!\n", walkedFiles[k3])
continue
}
LoadCount++
info := executor.Append(&pcsupload.UploadTaskUnit{
LocalFileChecksum: checksum.NewLocalFileChecksum(walkedFiles[k3], int(baidupcs.SliceMD5Size)),
SavePath: path.Clean(savePath + baidupcs.PathSeparator + subSavePath),
PCS: pcs,
UploadingDatabase: uploadDatabase,
Parallel: opt.Parallel,
PrintFormat: uploadPrintFormat(opt.Load),
NoRapidUpload: opt.NoRapidUpload,
NoSplitFile: opt.NoSplitFile,
UploadStatistic: statistic,
Policy: opt.Policy,
}, opt.MaxRetry)
if LoadCount >= opt.Load {
LoadCount = opt.Load
}
fmt.Printf("[%s] 加入上传队列: %s\n", info.Id(), walkedFiles[k3])
}
}
// 没有添加任何任务
if executor.Count() == 0 {
fmt.Printf("未检测到上传的文件.\n")
return
}
// 设置上传文件并发数
executor.SetParallel(LoadCount)
// 执行上传任务
executor.Execute()
fmt.Printf("\n")
fmt.Printf("上传结束, 时间: %s, 总大小: %s\n", statistic.Elapsed()/1e6*1e6, converter.ConvertFileSize(statistic.TotalSize()))
// 输出上传失败的文件列表
failedList := executor.FailedDeque()
if failedList.Size() != 0 {
fmt.Printf("以下文件上传失败: \n")
tb := pcstable.NewTable(os.Stdout)
for e := failedList.Shift(); e != nil; e = failedList.Shift() {
item := e.(*taskframework.TaskInfoItem)
tb.Append([]string{item.Info.Id(), item.Unit.(*pcsupload.UploadTaskUnit).LocalFileChecksum.Path})
}
tb.Render()
}
}
| pcsupload.Default |
endpoint.rs | // Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0.
use std::cell::RefCell;
use std::f64::INFINITY;
use std::fmt;
use std::sync::atomic::*;
use std::sync::*;
use std::time::*;
use configuration::Configuration;
use engine::{IterOption, DATA_KEY_PREFIX_LEN, DB};
use engine_traits::{name_to_cf, CfName, SstCompressionType};
use external_storage::*;
use futures::channel::mpsc::*;
use kvproto::backup::*;
use kvproto::kvrpcpb::{Context, IsolationLevel};
use kvproto::metapb::*;
use raft::StateRole;
use raftstore::coprocessor::RegionInfoProvider;
use raftstore::store::util::find_peer;
use tikv::config::BackupConfig;
use tikv::storage::kv::{Engine, ScanMode, Snapshot};
use tikv::storage::txn::{EntryBatch, SnapshotStore, TxnEntryScanner, TxnEntryStore};
use tikv::storage::Statistics;
use tikv_util::time::Limiter;
use tikv_util::timer::Timer;
use tikv_util::worker::{Runnable, RunnableWithTimer};
use txn_types::{Key, TimeStamp};
use yatp::task::callback::{Handle, TaskCell};
use yatp::ThreadPool;
use crate::metrics::*;
use crate::*;
const WORKER_TAKE_RANGE: usize = 6;
const BACKUP_BATCH_LIMIT: usize = 1024;
// if thread pool has been idle for such long time, we will shutdown it.
const IDLE_THREADPOOL_DURATION: u64 = 30 * 60 * 1000; // 30 mins
#[derive(Clone)]
struct Request {
start_key: Vec<u8>,
end_key: Vec<u8>,
start_ts: TimeStamp,
end_ts: TimeStamp,
limiter: Limiter,
backend: StorageBackend,
cancel: Arc<AtomicBool>,
is_raw_kv: bool,
cf: CfName,
compression_type: CompressionType,
compression_level: i32,
}
/// Backup Task.
pub struct Task {
request: Request,
pub(crate) resp: UnboundedSender<BackupResponse>,
}
impl fmt::Display for Task {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", self)
}
}
impl fmt::Debug for Task {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("BackupTask")
.field("start_ts", &self.request.start_ts)
.field("end_ts", &self.request.end_ts)
.field("start_key", &hex::encode_upper(&self.request.start_key))
.field("end_key", &hex::encode_upper(&self.request.end_key))
.field("is_raw_kv", &self.request.is_raw_kv)
.field("cf", &self.request.cf)
.finish()
}
}
#[derive(Clone)]
struct LimitedStorage {
limiter: Limiter,
storage: Arc<dyn ExternalStorage>,
}
impl Task {
/// Create a backup task based on the given backup request.
pub fn new(
req: BackupRequest,
resp: UnboundedSender<BackupResponse>,
) -> Result<(Task, Arc<AtomicBool>)> {
let cancel = Arc::new(AtomicBool::new(false));
let speed_limit = req.get_rate_limit();
let limiter = Limiter::new(if speed_limit > 0 {
speed_limit as f64
} else {
INFINITY
});
let cf = name_to_cf(req.get_cf()).ok_or_else(|| crate::Error::InvalidCf {
cf: req.get_cf().to_owned(),
})?;
let task = Task {
request: Request {
start_key: req.get_start_key().to_owned(),
end_key: req.get_end_key().to_owned(),
start_ts: req.get_start_version().into(),
end_ts: req.get_end_version().into(),
backend: req.get_storage_backend().clone(),
limiter,
cancel: cancel.clone(),
is_raw_kv: req.get_is_raw_kv(),
cf,
compression_type: req.get_compression_type(),
compression_level: req.get_compression_level(),
},
resp,
};
Ok((task, cancel))
}
/// Check whether the task is canceled.
pub fn has_canceled(&self) -> bool {
self.request.cancel.load(Ordering::SeqCst)
}
}
#[derive(Debug)]
pub struct BackupRange {
start_key: Option<Key>,
end_key: Option<Key>,
region: Region,
leader: Peer,
is_raw_kv: bool,
cf: CfName,
}
impl BackupRange {
/// Get entries from the scanner and save them to storage
fn backup<E: Engine>(
&self,
writer: &mut BackupWriter,
engine: &E,
backup_ts: TimeStamp,
begin_ts: TimeStamp,
) -> Result<Statistics> {
assert!(!self.is_raw_kv);
let mut ctx = Context::default();
ctx.set_region_id(self.region.get_id());
ctx.set_region_epoch(self.region.get_region_epoch().to_owned());
ctx.set_peer(self.leader.clone());
let snapshot = match engine.snapshot(&ctx) {
Ok(s) => s,
Err(e) => {
error!(?e; "backup snapshot failed");
return Err(e.into());
}
};
let snap_store = SnapshotStore::new(
snapshot,
backup_ts,
IsolationLevel::Si,
false, /* fill_cache */
Default::default(),
false,
);
let start_key = self.start_key.clone();
let end_key = self.end_key.clone();
// Incremental backup needs to output delete records.
let incremental = !begin_ts.is_zero();
let mut scanner = snap_store
.entry_scanner(start_key, end_key, begin_ts, incremental)
.unwrap();
let start = Instant::now();
let mut batch = EntryBatch::with_capacity(BACKUP_BATCH_LIMIT);
loop {
if let Err(e) = scanner.scan_entries(&mut batch) {
error!(?e; "backup scan entries failed");
return Err(e.into());
};
if batch.is_empty() {
break;
}
debug!("backup scan entries"; "len" => batch.len());
// Build sst files.
if let Err(e) = writer.write(batch.drain(), true) {
error!(?e; "backup build sst failed");
return Err(e);
}
}
BACKUP_RANGE_HISTOGRAM_VEC
.with_label_values(&["scan"])
.observe(start.elapsed().as_secs_f64());
let stat = scanner.take_statistics();
Ok(stat)
}
fn backup_raw<E: Engine>(
&self,
writer: &mut BackupRawKVWriter,
engine: &E,
) -> Result<Statistics> {
assert!(self.is_raw_kv);
let mut ctx = Context::default();
ctx.set_region_id(self.region.get_id());
ctx.set_region_epoch(self.region.get_region_epoch().to_owned());
ctx.set_peer(self.leader.clone());
let snapshot = match engine.snapshot(&ctx) {
Ok(s) => s,
Err(e) => {
error!(?e; "backup raw kv snapshot failed");
return Err(e.into());
}
};
let start = Instant::now();
let mut statistics = Statistics::default();
let cfstatistics = statistics.mut_cf_statistics(self.cf);
let mut option = IterOption::default();
if let Some(end) = self.end_key.clone() {
option.set_upper_bound(end.as_encoded(), DATA_KEY_PREFIX_LEN);
}
let mut cursor = snapshot.iter_cf(self.cf, option, ScanMode::Forward)?;
if let Some(begin) = self.start_key.clone() {
if !cursor.seek(&begin, cfstatistics)? {
return Ok(statistics);
}
} else {
if !cursor.seek_to_first(cfstatistics) {
return Ok(statistics);
}
}
let mut batch = vec![];
loop {
while cursor.valid()? && batch.len() < BACKUP_BATCH_LIMIT {
batch.push(Ok((
cursor.key(cfstatistics).to_owned(),
cursor.value(cfstatistics).to_owned(),
)));
cursor.next(cfstatistics);
}
if batch.is_empty() {
break;
}
debug!("backup scan raw kv entries"; "len" => batch.len());
// Build sst files.
if let Err(e) = writer.write(batch.drain(..), false) {
error!(?e; "backup raw kv build sst failed");
return Err(e);
}
}
BACKUP_RANGE_HISTOGRAM_VEC
.with_label_values(&["raw_scan"])
.observe(start.elapsed().as_secs_f64());
Ok(statistics)
}
fn backup_to_file<E: Engine>(
&self,
engine: &E,
db: Arc<DB>,
storage: &LimitedStorage,
file_name: String,
backup_ts: TimeStamp,
start_ts: TimeStamp,
compression_type: Option<SstCompressionType>,
compression_level: i32,
) -> Result<(Vec<File>, Statistics)> {
let mut writer = match BackupWriter::new(
db,
&file_name,
storage.limiter.clone(),
compression_type,
compression_level,
) {
Ok(w) => w,
Err(e) => {
error!(?e; "backup writer failed");
return Err(e);
}
};
let stat = match self.backup(&mut writer, engine, backup_ts, start_ts) {
Ok(s) => s,
Err(e) => return Err(e),
};
// Save sst files to storage.
match writer.save(&storage.storage) {
Ok(files) => Ok((files, stat)),
Err(e) => {
error!(?e; "backup save file failed");
Err(e)
}
}
}
fn backup_raw_kv_to_file<E: Engine>(
&self,
engine: &E,
db: Arc<DB>,
storage: &LimitedStorage,
file_name: String,
cf: CfName,
compression_type: Option<SstCompressionType>,
compression_level: i32,
) -> Result<(Vec<File>, Statistics)> {
let mut writer = match BackupRawKVWriter::new(
db,
&file_name,
cf,
storage.limiter.clone(),
compression_type,
compression_level,
) {
Ok(w) => w,
Err(e) => {
error!(?e; "backup writer failed");
return Err(e);
}
};
let stat = match self.backup_raw(&mut writer, engine) {
Ok(s) => s,
Err(e) => return Err(e),
};
// Save sst files to storage.
match writer.save(&storage.storage) {
Ok(files) => Ok((files, stat)),
Err(e) => {
error!(?e; "backup save file failed");
Err(e)
}
}
}
}
#[derive(Clone)]
pub struct ConfigManager(Arc<RwLock<BackupConfig>>);
impl configuration::ConfigManager for ConfigManager {
fn dispatch(&mut self, change: configuration::ConfigChange) -> configuration::Result<()> {
self.0.write().unwrap().update(change);
Ok(())
}
}
#[cfg(test)]
impl ConfigManager {
fn set_num_threads(&self, num_threads: usize) {
self.0.write().unwrap().num_threads = num_threads;
}
}
/// The endpoint of backup.
///
/// It coordinates backup tasks and dispatches them to different workers.
pub struct Endpoint<E: Engine, R: RegionInfoProvider> {
store_id: u64,
pool: RefCell<ControlThreadPool>,
pool_idle_threshold: u64,
db: Arc<DB>,
config_manager: ConfigManager,
pub(crate) engine: E,
pub(crate) region_info: R,
}
/// The progress of a backup task
pub struct Progress<R: RegionInfoProvider> {
store_id: u64,
next_start: Option<Key>,
end_key: Option<Key>,
region_info: R,
finished: bool,
is_raw_kv: bool,
cf: CfName,
}
impl<R: RegionInfoProvider> Progress<R> {
fn new(
store_id: u64,
next_start: Option<Key>,
end_key: Option<Key>,
region_info: R,
is_raw_kv: bool,
cf: CfName,
) -> Self {
Progress {
store_id,
next_start,
end_key,
region_info,
finished: Default::default(),
is_raw_kv,
cf,
}
}
/// Forward the progress by `ranges` BackupRanges
///
/// The size of the returned BackupRanges should <= `ranges`
fn forward(&mut self, limit: usize) -> Vec<BackupRange> {
if self.finished {
return Vec::new();
}
let store_id = self.store_id;
let (tx, rx) = mpsc::channel();
let start_key_ = self
.next_start
.clone()
.map_or_else(Vec::new, |k| k.into_encoded());
let start_key = self.next_start.clone();
let end_key = self.end_key.clone();
let raw_kv = self.is_raw_kv;
let cf_name = self.cf;
let res = self.region_info.seek_region(
&start_key_,
Box::new(move |iter| {
let mut sended = 0;
for info in iter {
let region = &info.region;
if end_key.is_some() {
let end_slice = end_key.as_ref().unwrap().as_encoded().as_slice();
if end_slice <= region.get_start_key() {
// We have reached the end.
// The range is defined as [start, end) so break if
// region start key is greater or equal to end key.
break;
}
}
if info.role == StateRole::Leader {
let ekey = get_min_end_key(end_key.as_ref(), ®ion);
let skey = get_max_start_key(start_key.as_ref(), ®ion);
assert!(!(skey == ekey && ekey.is_some()), "{:?} {:?}", skey, ekey);
let leader = find_peer(region, store_id).unwrap().to_owned();
let backup_range = BackupRange {
start_key: skey,
end_key: ekey,
region: region.clone(),
leader,
is_raw_kv: raw_kv,
cf: cf_name,
};
tx.send(backup_range).unwrap();
sended += 1;
if sended >= limit {
break;
}
}
}
}),
);
if let Err(e) = res {
// TODO: handle error.
error!(?e; "backup seek region failed");
}
let branges: Vec<_> = rx.iter().collect();
if let Some(b) = branges.last() {
// The region's end key is empty means it is the last
// region, we need to set the `finished` flag here in case
// we run with `next_start` set to None
if b.region.get_end_key().is_empty() || b.end_key == self.end_key {
self.finished = true;
}
self.next_start = b.end_key.clone();
} else {
self.finished = true;
}
branges
}
}
struct ControlThreadPool {
size: usize,
workers: Option<Arc<ThreadPool<TaskCell>>>,
last_active: Instant,
}
impl ControlThreadPool {
fn new() -> Self {
ControlThreadPool {
size: 0,
workers: None,
last_active: Instant::now(),
}
}
fn spawn<F>(&mut self, func: F)
where
F: FnOnce() + Send + 'static,
{
let workers = self.workers.as_ref().unwrap();
let w = workers.clone();
workers.spawn(move |_: &mut Handle<'_>| {
func();
// Debug service requires jobs in the old thread pool continue to run even after
// the pool is recreated. So the pool needs to be ref counted and dropped after
// task has finished.
drop(w);
});
}
/// Lazily adjust the thread pool's size
///
/// Resizing if the thread pool need to expend or there
/// are too many idle threads. Otherwise do nothing.
fn adjust_with(&mut self, new_size: usize) {
if self.size >= new_size && self.size - new_size <= 10 {
return;
}
let workers = Arc::new(
yatp::Builder::new(thd_name!("backup-worker"))
.max_thread_count(new_size)
.build_callback_pool(),
);
let _ = self.workers.replace(workers);
self.size = new_size;
BACKUP_THREAD_POOL_SIZE_GAUGE.set(new_size as i64);
}
fn heartbeat(&mut self) {
self.last_active = Instant::now();
}
/// Shutdown the thread pool if it has been idle for a long time.
fn check_active(&mut self, idle_threshold: Duration) {
if self.last_active.elapsed() >= idle_threshold {
self.size = 0;
if let Some(w) = self.workers.take() {
let start = Instant::now();
drop(w);
slow_log!(start.elapsed(), "backup thread pool shutdown too long");
}
}
}
}
#[test]
fn test_control_thread_pool_adjust_keep_tasks() {
use std::thread::sleep;
let counter = Arc::new(AtomicU32::new(0));
let mut pool = ControlThreadPool::new();
pool.adjust_with(3);
for i in 0..8 {
let ctr = counter.clone();
pool.spawn(move || {
sleep(Duration::from_millis(100));
ctr.fetch_or(1 << i, Ordering::SeqCst);
});
}
sleep(Duration::from_millis(150));
pool.adjust_with(4);
for i in 8..16 {
let ctr = counter.clone();
pool.spawn(move || {
sleep(Duration::from_millis(100));
ctr.fetch_or(1 << i, Ordering::SeqCst);
});
}
sleep(Duration::from_millis(250));
assert_eq!(counter.load(Ordering::SeqCst), 0xffff);
}
impl<E: Engine, R: RegionInfoProvider> Endpoint<E, R> {
pub fn new(
store_id: u64,
engine: E,
region_info: R,
db: Arc<DB>,
config: BackupConfig,
) -> Endpoint<E, R> {
Endpoint {
store_id,
engine,
region_info,
pool: RefCell::new(ControlThreadPool::new()),
pool_idle_threshold: IDLE_THREADPOOL_DURATION,
db,
config_manager: ConfigManager(Arc::new(RwLock::new(config))),
}
}
pub fn new_timer(&self) -> Timer<()> {
let mut timer = Timer::new(1);
timer.add_task(Duration::from_millis(self.pool_idle_threshold), ());
timer
}
pub fn get_config_manager(&self) -> ConfigManager {
self.config_manager.clone()
}
fn spawn_backup_worker(
&self,
prs: Arc<Mutex<Progress<R>>>,
request: Request,
tx: UnboundedSender<BackupResponse>,
) {
let start_ts = request.start_ts;
let end_ts = request.end_ts;
let backup_ts = request.end_ts;
let engine = self.engine.clone();
let db = self.db.clone();
let store_id = self.store_id;
// TODO: make it async.
self.pool.borrow_mut().spawn(move || loop {
let (branges, is_raw_kv, cf) = {
// Release lock as soon as possible.
// It is critical to speed up backup, otherwise workers are
// blocked by each other.
let mut progress = prs.lock().unwrap();
(
progress.forward(WORKER_TAKE_RANGE),
progress.is_raw_kv,
progress.cf,
)
};
if branges.is_empty() {
return;
}
// Check if we can open external storage.
let backend = match create_storage(&request.backend) {
Ok(backend) => backend,
Err(err) => {
error!(?err; "backup create storage failed");
let mut response = BackupResponse::default();
response.set_error(crate::Error::Io(err).into());
if let Err(err) = tx.unbounded_send(response) {
error!(?err; "backup failed to send response");
}
return;
}
};
let storage = LimitedStorage {
limiter: request.limiter.clone(),
storage: backend,
};
for brange in branges {
if request.cancel.load(Ordering::SeqCst) {
warn!("backup task has canceled"; "range" => ?brange);
return;
}
// TODO: make file_name unique and short
let key = brange.start_key.clone().and_then(|k| {
// use start_key sha256 instead of start_key to avoid file name too long os error
let input = if is_raw_kv {
k.into_encoded()
} else {
k.into_raw().unwrap()
};
tikv_util::file::sha256(&input).ok().map(|b| hex::encode(b))
});
let name = backup_file_name(store_id, &brange.region, key);
let ct = to_sst_compression_type(request.compression_type);
let (res, start_key, end_key) = if is_raw_kv {
(
brange.backup_raw_kv_to_file(
&engine,
db.clone(),
&storage,
name,
cf,
ct,
request.compression_level,
),
brange
.start_key
.map_or_else(|| vec![], |k| k.into_encoded()),
brange.end_key.map_or_else(|| vec![], |k| k.into_encoded()),
)
} else {
(
brange.backup_to_file(
&engine,
db.clone(),
&storage,
name,
backup_ts,
start_ts,
ct,
request.compression_level,
),
brange
.start_key
.map_or_else(|| vec![], |k| k.into_raw().unwrap()),
brange
.end_key
.map_or_else(|| vec![], |k| k.into_raw().unwrap()),
)
};
let mut response = BackupResponse::default();
match res {
Err(e) => {
error!(?e; "backup region failed";
"region" => ?brange.region,
"start_key" => hex::encode_upper(&start_key),
"end_key" => hex::encode_upper(&end_key),
);
response.set_error(e.into());
}
Ok((mut files, stat)) => {
debug!("backup region finish";
"region" => ?brange.region,
"start_key" => hex::encode_upper(&start_key),
"end_key" => hex::encode_upper(&end_key),
"details" => ?stat);
for file in files.iter_mut() {
file.set_start_key(start_key.clone());
file.set_end_key(end_key.clone());
file.set_start_version(start_ts.into_inner());
file.set_end_version(end_ts.into_inner());
}
response.set_files(files.into());
}
}
response.set_start_key(start_key);
response.set_end_key(end_key);
if let Err(e) = tx.unbounded_send(response) {
error!(?e; "backup failed to send response");
return;
}
}
});
}
pub fn handle_backup_task(&self, task: Task) {
let Task { request, resp } = task;
let is_raw_kv = request.is_raw_kv;
let start_key = if request.start_key.is_empty() {
None
} else {
// TODO: if is_raw_kv is written everywhere. It need to be simplified.
if is_raw_kv {
Some(Key::from_encoded(request.start_key.clone()))
} else {
Some(Key::from_raw(&request.start_key.clone()))
}
};
let end_key = if request.end_key.is_empty() {
None
} else {
if is_raw_kv {
Some(Key::from_encoded(request.end_key.clone()))
} else {
Some(Key::from_raw(&request.end_key.clone()))
}
};
let prs = Arc::new(Mutex::new(Progress::new(
self.store_id,
start_key,
end_key,
self.region_info.clone(),
is_raw_kv,
request.cf,
)));
let concurrency = self.config_manager.0.read().unwrap().num_threads;
self.pool.borrow_mut().adjust_with(concurrency);
for _ in 0..concurrency {
self.spawn_backup_worker(prs.clone(), request.clone(), resp.clone());
}
}
}
impl<E: Engine, R: RegionInfoProvider> Runnable<Task> for Endpoint<E, R> {
fn run(&mut self, task: Task) {
if task.has_canceled() {
warn!("backup task has canceled"; "task" => %task);
return;
}
info!("run backup task"; "task" => %task);
self.handle_backup_task(task);
self.pool.borrow_mut().heartbeat();
}
}
impl<E: Engine, R: RegionInfoProvider> RunnableWithTimer<Task, ()> for Endpoint<E, R> {
fn on_timeout(&mut self, timer: &mut Timer<()>, _: ()) {
let pool_idle_duration = Duration::from_millis(self.pool_idle_threshold);
self.pool
.borrow_mut()
.check_active(pool_idle_duration.clone());
timer.add_task(pool_idle_duration, ());
}
}
/// Get the min end key from the given `end_key` and `Region`'s end key.
fn get_min_end_key(end_key: Option<&Key>, region: &Region) -> Option<Key> {
let region_end = if region.get_end_key().is_empty() {
None
} else {
Some(Key::from_encoded_slice(region.get_end_key()))
};
if region.get_end_key().is_empty() {
end_key.cloned()
} else if end_key.is_none() {
region_end
} else {
let end_slice = end_key.as_ref().unwrap().as_encoded().as_slice();
if end_slice < region.get_end_key() {
end_key.cloned()
} else {
region_end
}
}
}
/// Get the max start key from the given `start_key` and `Region`'s start key.
fn get_max_start_key(start_key: Option<&Key>, region: &Region) -> Option<Key> {
let region_start = if region.get_start_key().is_empty() {
None
} else {
Some(Key::from_encoded_slice(region.get_start_key()))
};
if start_key.is_none() {
region_start
} else {
let start_slice = start_key.as_ref().unwrap().as_encoded().as_slice();
if start_slice < region.get_start_key() {
region_start
} else {
start_key.cloned()
}
}
}
/// Construct an backup file name based on the given store id, region, range start key and local unix timestamp.
/// A name consists with five parts: store id, region_id, a epoch version, the hash of range start key and timestamp.
/// range start key is used to keep the unique file name for file, to handle different tables exists on the same region.
/// local unix timestamp is used to keep the unique file name for file, to handle receive the same request after connection reset.
fn backup_file_name(store_id: u64, region: &Region, key: Option<String>) -> String {
let start = SystemTime::now();
let since_the_epoch = start
.duration_since(UNIX_EPOCH)
.expect("Time went backwards");
match key {
Some(k) => format!(
"{}_{}_{}_{}_{}",
store_id,
region.get_id(),
region.get_region_epoch().get_version(),
k,
since_the_epoch.as_millis()
),
None => format!(
"{}_{}_{}",
store_id,
region.get_id(),
region.get_region_epoch().get_version()
),
}
}
// convert BackupCompresionType to rocks db DBCompressionType
fn to_sst_compression_type(ct: CompressionType) -> Option<SstCompressionType> {
match ct {
CompressionType::Lz4 => Some(SstCompressionType::Lz4),
CompressionType::Snappy => Some(SstCompressionType::Snappy),
CompressionType::Zstd => Some(SstCompressionType::Zstd),
CompressionType::Unknown => None,
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use external_storage::{make_local_backend, make_noop_backend};
use futures::executor::block_on;
use futures::stream::StreamExt;
use kvproto::metapb;
use raftstore::coprocessor::RegionCollector;
use raftstore::coprocessor::Result as CopResult;
use raftstore::coprocessor::SeekRegionCallback;
use raftstore::store::util::new_peer;
use rand;
use std::thread;
use tempfile::TempDir;
use tikv::storage::mvcc::tests::*;
use tikv::storage::{RocksEngine, TestEngineBuilder};
use tikv_util::time::Instant;
use txn_types::SHORT_VALUE_MAX_LEN;
#[derive(Clone)]
pub struct MockRegionInfoProvider {
regions: Arc<Mutex<RegionCollector>>,
cancel: Option<Arc<AtomicBool>>,
}
impl MockRegionInfoProvider {
pub fn new() -> Self {
MockRegionInfoProvider {
regions: Arc::new(Mutex::new(RegionCollector::new())),
cancel: None,
}
}
pub fn set_regions(&self, regions: Vec<(Vec<u8>, Vec<u8>, u64)>) {
let mut map = self.regions.lock().unwrap();
for (mut start_key, mut end_key, id) in regions {
if !start_key.is_empty() {
start_key = Key::from_raw(&start_key).into_encoded();
}
if !end_key.is_empty() {
end_key = Key::from_raw(&end_key).into_encoded();
}
let mut r = metapb::Region::default();
r.set_id(id);
r.set_start_key(start_key.clone());
r.set_end_key(end_key);
r.mut_peers().push(new_peer(1, 1));
map.create_region(r, StateRole::Leader);
}
}
fn canecl_on_seek(&mut self, cancel: Arc<AtomicBool>) {
self.cancel = Some(cancel);
}
}
impl RegionInfoProvider for MockRegionInfoProvider {
fn seek_region(&self, from: &[u8], callback: SeekRegionCallback) -> CopResult<()> {
let from = from.to_vec();
let regions = self.regions.lock().unwrap();
if let Some(c) = self.cancel.as_ref() {
c.store(true, Ordering::SeqCst);
}
regions.handle_seek_region(from, callback);
Ok(())
}
}
pub fn new_endpoint() -> (TempDir, Endpoint<RocksEngine, MockRegionInfoProvider>) {
let temp = TempDir::new().unwrap();
let rocks = TestEngineBuilder::new()
.path(temp.path())
.cfs(&[
engine_traits::CF_DEFAULT,
engine_traits::CF_LOCK,
engine_traits::CF_WRITE,
])
.build()
.unwrap();
let db = rocks.get_rocksdb();
(
temp,
Endpoint::new(
1,
rocks,
MockRegionInfoProvider::new(),
db,
BackupConfig { num_threads: 4 },
),
)
}
pub fn check_response<F>(rx: UnboundedReceiver<BackupResponse>, check: F)
where
F: FnOnce(Option<BackupResponse>),
{
let rx = rx.fuse();
let (resp, rx) = block_on(rx.into_future());
check(resp);
let (none, _rx) = block_on(rx.into_future());
assert!(none.is_none(), "{:?}", none);
}
#[test]
fn test_seek_range() {
let (_tmp, endpoint) = new_endpoint();
endpoint.region_info.set_regions(vec![
(b"".to_vec(), b"1".to_vec(), 1),
(b"1".to_vec(), b"2".to_vec(), 2),
(b"3".to_vec(), b"4".to_vec(), 3),
(b"7".to_vec(), b"9".to_vec(), 4),
(b"9".to_vec(), b"".to_vec(), 5),
]);
// Test seek backup range.
let test_seek_backup_range =
|start_key: &[u8], end_key: &[u8], expect: Vec<(&[u8], &[u8])>| {
let start_key = if start_key.is_empty() {
None
} else {
Some(Key::from_raw(start_key))
};
let end_key = if end_key.is_empty() {
None
} else {
Some(Key::from_raw(end_key))
};
let mut prs = Progress::new(
endpoint.store_id,
start_key,
end_key,
endpoint.region_info.clone(),
false,
engine_traits::CF_DEFAULT,
);
let mut ranges = Vec::with_capacity(expect.len());
while ranges.len() != expect.len() {
let n = (rand::random::<usize>() % 3) + 1;
let mut r = prs.forward(n);
// The returned backup ranges should <= n
assert!(r.len() <= n);
if r.is_empty() {
// if return a empty vec then the progress is finished
assert_eq!(
ranges.len(),
expect.len(),
"got {:?}, expect {:?}",
ranges,
expect
);
}
ranges.append(&mut r);
}
for (a, b) in ranges.into_iter().zip(expect) {
assert_eq!(
a.start_key.map_or_else(Vec::new, |k| k.into_raw().unwrap()),
b.0
);
assert_eq!(
a.end_key.map_or_else(Vec::new, |k| k.into_raw().unwrap()),
b.1
);
}
};
// Test whether responses contain correct range.
#[allow(clippy::block_in_if_condition_stmt)]
let test_handle_backup_task_range =
|start_key: &[u8], end_key: &[u8], expect: Vec<(&[u8], &[u8])>| {
let tmp = TempDir::new().unwrap();
let backend = external_storage::make_local_backend(tmp.path());
let (tx, rx) = unbounded();
let task = Task {
request: Request {
start_key: start_key.to_vec(),
end_key: end_key.to_vec(),
start_ts: 1.into(),
end_ts: 1.into(),
backend,
limiter: Limiter::new(INFINITY),
cancel: Arc::default(),
is_raw_kv: false,
cf: engine_traits::CF_DEFAULT,
compression_type: CompressionType::Unknown,
compression_level: 0,
},
resp: tx,
};
endpoint.handle_backup_task(task);
let resps: Vec<_> = block_on(rx.collect());
for a in &resps {
assert!(
expect
.iter()
.any(|b| { a.get_start_key() == b.0 && a.get_end_key() == b.1 }),
"{:?} {:?}",
resps,
expect
);
}
assert_eq!(resps.len(), expect.len());
};
// Backup range from case.0 to case.1,
// the case.2 is the expected results.
type Case<'a> = (&'a [u8], &'a [u8], Vec<(&'a [u8], &'a [u8])>);
let case: Vec<Case> = vec![
(b"", b"1", vec![(b"", b"1")]),
(b"", b"2", vec![(b"", b"1"), (b"1", b"2")]),
(b"1", b"2", vec![(b"1", b"2")]),
(b"1", b"3", vec![(b"1", b"2")]),
(b"1", b"4", vec![(b"1", b"2"), (b"3", b"4")]),
(b"4", b"6", vec![]),
(b"4", b"5", vec![]),
(b"2", b"7", vec![(b"3", b"4")]),
(b"7", b"8", vec![(b"7", b"8")]),
(b"3", b"", vec![(b"3", b"4"), (b"7", b"9"), (b"9", b"")]),
(b"5", b"", vec![(b"7", b"9"), (b"9", b"")]),
(b"7", b"", vec![(b"7", b"9"), (b"9", b"")]),
(b"8", b"91", vec![(b"8", b"9"), (b"9", b"91")]),
(b"8", b"", vec![(b"8", b"9"), (b"9", b"")]),
(
b"",
b"",
vec![
(b"", b"1"),
(b"1", b"2"),
(b"3", b"4"),
(b"7", b"9"),
(b"9", b""),
],
),
];
for (start_key, end_key, ranges) in case {
test_seek_backup_range(start_key, end_key, ranges.clone());
test_handle_backup_task_range(start_key, end_key, ranges);
}
}
#[test]
fn | () {
let (tmp, endpoint) = new_endpoint();
let engine = endpoint.engine.clone();
endpoint
.region_info
.set_regions(vec![(b"".to_vec(), b"5".to_vec(), 1)]);
let mut ts = TimeStamp::new(1);
let mut alloc_ts = || *ts.incr();
let mut backup_tss = vec![];
// Multi-versions for key 0..9.
for len in &[SHORT_VALUE_MAX_LEN - 1, SHORT_VALUE_MAX_LEN * 2] {
for i in 0..10u8 {
let start = alloc_ts();
let commit = alloc_ts();
let key = format!("{}", i);
must_prewrite_put(
&engine,
key.as_bytes(),
&vec![i; *len],
key.as_bytes(),
start,
);
must_commit(&engine, key.as_bytes(), start, commit);
backup_tss.push((alloc_ts(), len));
}
}
// TODO: check key number for each snapshot.
let limiter = Limiter::new(10.0 * 1024.0 * 1024.0 /* 10 MB/s */);
for (ts, len) in backup_tss {
let mut req = BackupRequest::default();
req.set_start_key(vec![]);
req.set_end_key(vec![b'5']);
req.set_start_version(0);
req.set_end_version(ts.into_inner());
let (tx, rx) = unbounded();
// Set an unique path to avoid AlreadyExists error.
req.set_storage_backend(make_local_backend(&tmp.path().join(ts.to_string())));
if len % 2 == 0 {
req.set_rate_limit(10 * 1024 * 1024);
}
let (mut task, _) = Task::new(req, tx).unwrap();
if len % 2 == 0 {
// Make sure the rate limiter is set.
assert!(task.request.limiter.speed_limit().is_finite());
// Share the same rate limiter.
task.request.limiter = limiter.clone();
}
endpoint.handle_backup_task(task);
let (resp, rx) = block_on(rx.into_future());
let resp = resp.unwrap();
assert!(!resp.has_error(), "{:?}", resp);
let file_len = if *len <= SHORT_VALUE_MAX_LEN { 1 } else { 2 };
assert_eq!(
resp.get_files().len(),
file_len, /* default and write */
"{:?}",
resp
);
let (none, _rx) = block_on(rx.into_future());
assert!(none.is_none(), "{:?}", none);
}
}
#[test]
fn test_scan_error() {
let (tmp, endpoint) = new_endpoint();
let engine = endpoint.engine.clone();
endpoint
.region_info
.set_regions(vec![(b"".to_vec(), b"5".to_vec(), 1)]);
let mut ts: TimeStamp = 1.into();
let mut alloc_ts = || *ts.incr();
let start = alloc_ts();
let key = format!("{}", start);
must_prewrite_put(
&engine,
key.as_bytes(),
key.as_bytes(),
key.as_bytes(),
start,
);
let now = alloc_ts();
let mut req = BackupRequest::default();
req.set_start_key(vec![]);
req.set_end_key(vec![b'5']);
req.set_start_version(now.into_inner());
req.set_end_version(now.into_inner());
req.set_concurrency(4);
// Set an unique path to avoid AlreadyExists error.
req.set_storage_backend(make_local_backend(&tmp.path().join(now.to_string())));
let (tx, rx) = unbounded();
let (task, _) = Task::new(req.clone(), tx).unwrap();
endpoint.handle_backup_task(task);
check_response(rx, |resp| {
let resp = resp.unwrap();
assert!(resp.get_error().has_kv_error(), "{:?}", resp);
assert!(resp.get_error().get_kv_error().has_locked(), "{:?}", resp);
assert_eq!(resp.get_files().len(), 0, "{:?}", resp);
});
// Commit the perwrite.
let commit = alloc_ts();
must_commit(&engine, key.as_bytes(), start, commit);
// Test whether it can correctly convert not leader to region error.
engine.trigger_not_leader();
let now = alloc_ts();
req.set_start_version(now.into_inner());
req.set_end_version(now.into_inner());
// Set an unique path to avoid AlreadyExists error.
req.set_storage_backend(make_local_backend(&tmp.path().join(now.to_string())));
let (tx, rx) = unbounded();
let (task, _) = Task::new(req, tx).unwrap();
endpoint.handle_backup_task(task);
check_response(rx, |resp| {
let resp = resp.unwrap();
assert!(resp.get_error().has_region_error(), "{:?}", resp);
assert!(
resp.get_error().get_region_error().has_not_leader(),
"{:?}",
resp
);
});
}
#[test]
fn test_cancel() {
let (temp, mut endpoint) = new_endpoint();
let engine = endpoint.engine.clone();
endpoint
.region_info
.set_regions(vec![(b"".to_vec(), b"5".to_vec(), 1)]);
let mut ts: TimeStamp = 1.into();
let mut alloc_ts = || *ts.incr();
let start = alloc_ts();
let key = format!("{}", start);
must_prewrite_put(
&engine,
key.as_bytes(),
key.as_bytes(),
key.as_bytes(),
start,
);
// Commit the perwrite.
let commit = alloc_ts();
must_commit(&engine, key.as_bytes(), start, commit);
let now = alloc_ts();
let mut req = BackupRequest::default();
req.set_start_key(vec![]);
req.set_end_key(vec![]);
req.set_start_version(now.into_inner());
req.set_end_version(now.into_inner());
req.set_concurrency(4);
req.set_storage_backend(make_local_backend(temp.path()));
// Cancel the task before starting the task.
let (tx, rx) = unbounded();
let (task, cancel) = Task::new(req.clone(), tx).unwrap();
// Cancel the task.
cancel.store(true, Ordering::SeqCst);
endpoint.handle_backup_task(task);
check_response(rx, |resp| {
assert!(resp.is_none());
});
// Cancel the task during backup.
let (tx, rx) = unbounded();
let (task, cancel) = Task::new(req, tx).unwrap();
endpoint.region_info.canecl_on_seek(cancel);
endpoint.handle_backup_task(task);
check_response(rx, |resp| {
assert!(resp.is_none());
});
}
#[test]
fn test_busy() {
let (_tmp, endpoint) = new_endpoint();
let engine = endpoint.engine.clone();
endpoint
.region_info
.set_regions(vec![(b"".to_vec(), b"5".to_vec(), 1)]);
let mut req = BackupRequest::default();
req.set_start_key(vec![]);
req.set_end_key(vec![]);
req.set_start_version(1);
req.set_end_version(1);
req.set_concurrency(4);
req.set_storage_backend(make_noop_backend());
let (tx, rx) = unbounded();
let (task, _) = Task::new(req, tx).unwrap();
// Pause the engine 6 seconds to trigger Timeout error.
// The Timeout error is translated to server is busy.
engine.pause(Duration::from_secs(6));
endpoint.handle_backup_task(task);
check_response(rx, |resp| {
let resp = resp.unwrap();
assert!(resp.get_error().has_region_error(), "{:?}", resp);
assert!(
resp.get_error().get_region_error().has_server_is_busy(),
"{:?}",
resp
);
});
}
#[test]
fn test_adjust_thread_pool_size() {
let (_tmp, endpoint) = new_endpoint();
endpoint
.region_info
.set_regions(vec![(b"".to_vec(), b"".to_vec(), 1)]);
let mut req = BackupRequest::default();
req.set_start_key(vec![]);
req.set_end_key(vec![]);
req.set_start_version(1);
req.set_end_version(1);
req.set_storage_backend(make_noop_backend());
let (tx, _) = unbounded();
// expand thread pool is needed
endpoint.get_config_manager().set_num_threads(15);
let (task, _) = Task::new(req.clone(), tx.clone()).unwrap();
endpoint.handle_backup_task(task);
assert!(endpoint.pool.borrow().size == 15);
// shrink thread pool only if there are too many idle threads
endpoint.get_config_manager().set_num_threads(10);
let (task, _) = Task::new(req.clone(), tx.clone()).unwrap();
endpoint.handle_backup_task(task);
assert!(endpoint.pool.borrow().size == 15);
endpoint.get_config_manager().set_num_threads(3);
let (task, _) = Task::new(req, tx).unwrap();
endpoint.handle_backup_task(task);
assert!(endpoint.pool.borrow().size == 3);
}
#[test]
fn test_thread_pool_shutdown_when_idle() {
let (_, mut endpoint) = new_endpoint();
// set the idle threshold to 100ms
endpoint.pool_idle_threshold = 100;
let mut backup_timer = endpoint.new_timer();
let endpoint = Arc::new(Mutex::new(endpoint));
let scheduler = {
let endpoint = endpoint.clone();
let (tx, rx) = tikv_util::mpsc::unbounded();
thread::spawn(move || loop {
let tick_time = backup_timer.next_timeout().unwrap();
let timeout = tick_time.checked_sub(Instant::now()).unwrap_or_default();
let task = match rx.recv_timeout(timeout) {
Ok(Some(task)) => Some(task),
_ => None,
};
if let Some(task) = task {
let mut endpoint = endpoint.lock().unwrap();
endpoint.run(task);
}
endpoint.lock().unwrap().on_timeout(&mut backup_timer, ());
});
tx
};
let mut req = BackupRequest::default();
req.set_start_key(vec![]);
req.set_end_key(vec![]);
req.set_start_version(1);
req.set_end_version(1);
req.set_storage_backend(make_noop_backend());
endpoint
.lock()
.unwrap()
.get_config_manager()
.set_num_threads(10);
let (tx, resp_rx) = unbounded();
let (task, _) = Task::new(req, tx).unwrap();
// if not task arrive after create the thread pool is empty
assert_eq!(endpoint.lock().unwrap().pool.borrow().size, 0);
scheduler.send(Some(task)).unwrap();
// wait until the task finish
let _ = block_on(resp_rx.into_future());
assert_eq!(endpoint.lock().unwrap().pool.borrow().size, 10);
// thread pool not yet shutdown
thread::sleep(Duration::from_millis(50));
assert_eq!(endpoint.lock().unwrap().pool.borrow().size, 10);
// thread pool shutdown if not task arrive for 100ms
thread::sleep(Duration::from_millis(50));
assert_eq!(endpoint.lock().unwrap().pool.borrow().size, 0);
}
// TODO: region err in txn(engine(request))
}
| test_handle_backup_task |
mod.rs | // Copyright 2021 The Engula Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod kernel;
mod update_reader;
mod update_writer;
pub use self::{kernel::Kernel, mem::Kernel as MemKernel};
mod mem {
use engula_journal::MemJournal;
use engula_storage::MemStorage;
use crate::Result;
pub type Kernel = super::Kernel<MemJournal, MemStorage>;
impl Kernel {
pub async fn open() -> Result<Self> {
let journal = MemJournal::default();
let storage = MemStorage::default();
Self::init(journal, storage).await
} | }
} |
|
TheDart101.py | import sys
import math
TARGET_SCORE = 101
def | (shoots):
rounds, throws, misses, score = 1, 0, 0, 0
prev_round_score = 0
prev_shot = ''
for shot in shoots.split():
throws += 1
if 'X' in shot:
misses += 1
score -= 20
if prev_shot == 'X': score -= 10
if misses == 3: score = 0
if throws == 3:
throws = 0
rounds += 1
misses = 0
prev_shot = ''
prev_round_score = score
else:
prev_shot = shot
else:
if '*' in shot:
a, b = map(int, shot.split('*'))
points = a * b
else:
points = int(shot)
if score + points == TARGET_SCORE:
return rounds
elif score + points > TARGET_SCORE:
throws = 3
score = prev_round_score
else:
score += points
if throws == 3:
throws = 0
rounds += 1
misses = 0
prev_shot = ''
prev_round_score = score
else:
prev_shot = shot
return -1
num_players = int(input())
player_names = [input() for _ in range(num_players)]
shortest_rounds = math.inf
winner = ''
for i in range(num_players):
shoots = input()
rounds = simulate(shoots)
if rounds != -1 and rounds < shortest_rounds:
shortest_rounds = rounds
winner = player_names[i]
print(winner)
| simulate |
layout-thin.js | import { h } from 'vue'
export default {
name: "LayoutThin", | render() {
return h(
"svg",
{"xmlns":"http://www.w3.org/2000/svg","viewBox":"0 0 256 256","class":"v-icon","fill":"currentColor","data-name":"ph-layout-thin","innerHTML":" <rect width='256' height='256' fill='none'/> <line x1='104' y1='104' x2='104' y2='208' fill='none' stroke='#000' stroke-linecap='round' stroke-linejoin='round' stroke-width='8'/> <line x1='32' y1='104' x2='224' y2='104' fill='none' stroke='#000' stroke-linecap='round' stroke-linejoin='round' stroke-width='8'/> <rect x='32' y='48' width='192' height='160' rx='8' stroke-width='8' stroke='#000' stroke-linecap='round' stroke-linejoin='round' fill='none'/>"},
)
}
} | vendor: "Ph",
type: "",
tags: ["layout","thin"], |
index.ts | import {
addi,
addr,
bani,
banr,
bori,
borr,
eqir,
eqri, | muli,
mulr,
OpCode,
seti,
setr,
} from '../../day16-chronal-classification/lib';
const ipReg = /^#ip\s(\d+)$/;
const progReg = /^(\w+)\s(\d+)\s(\d+)\s(\d+)$/;
const operationMap: {
[opCode: string]: OpCode,
} = {
addi,
addr,
bani,
banr,
bori,
borr,
eqir,
eqri,
eqrr,
gtir,
gtri,
gtrr,
muli,
mulr,
seti,
setr,
};
export function parseIp(str: string | undefined) {
const match = ipReg.exec(str || '');
if (!match) {
throw new Error(`Invalid ip input ${str}`);
}
return +match[1];
}
export function parseProg(str: string) {
const match = progReg.exec(str || '');
if (!match) {
throw new Error(`Invalid program input ${str}`);
}
const op = operationMap[match[1]];
if (!op) {
throw new Error(`No op found ${match[1]}`);
}
return op.instruction(...match.slice(1).map((m) => +m));
}
export default (input: string[], registerStart = 0) => {
const ip = parseIp(input.slice(0).shift());
const programMap = input.slice(1).map(parseProg);
const register = [registerStart, 0, 0, 0, 0, 0];
let i = register[ip];
let instruction = programMap[i];
while (instruction) {
register[ip] = i;
instruction(register);
i = register[ip];
i++;
instruction = programMap[i];
if (i === 1) {
// This is the first program specific part when instruction 1 is reached number is known
break;
}
}
// This is the second program specific part my number is in register 1
const largeNumber = register[1];
let sum = 0;
for (i = 1; i <= largeNumber; i++) {
if (largeNumber % i === 0) {
sum += i;
}
}
return sum;
}; | eqrr,
gtir,
gtri,
gtrr, |
firebaseui-angular-library.component.ts | import { Component, EventEmitter, Inject, Input, NgZone, OnChanges, OnDestroy, OnInit, Optional, Output, SimpleChanges } from '@angular/core';
import { AngularFireAuth } from '@angular/fire/compat/auth';
import firebase from 'firebase/compat/app';
import 'firebase/compat/auth';
import * as firebaseui from 'firebaseui';
import { Subscription } from 'rxjs';
import {
ExtendedFirebaseUIAuthConfig,
FirebaseUISignInFailure,
FirebaseUISignInSuccessWithAuthResult
} from './firebaseui-angular-library.helper';
import { FirebaseuiAngularLibraryService } from './firebaseui-angular-library.service';
import User = firebase.User;
import UserCredential = firebase.auth.UserCredential;
import AuthUI = firebaseui.auth.AuthUI;
@Component({
selector: 'firebase-ui',
template: '<div id="firebaseui-auth-container"></div>'
})
export class | implements OnInit, OnDestroy, OnChanges {
private static readonly COMPUTED_CALLBACKS = 'COMPUTED_CALLBACKS';
private firebaseUISubscription: Subscription;
@Optional() @Input("language") language: string;
@Output('signInSuccessWithAuthResult') signInSuccessWithAuthResultCallback: EventEmitter<FirebaseUISignInSuccessWithAuthResult> = new EventEmitter(); // tslint:disable-line
@Output('signInFailure') signInFailureCallback: EventEmitter<FirebaseUISignInFailure> = new EventEmitter(); // tslint:disable-line
@Output('uiShown') uiShownCallback: EventEmitter<void> = new EventEmitter(); // tslint:disable-line
private subscription: Subscription;
constructor(private angularFireAuth: AngularFireAuth,
@Inject('firebaseUIAuthConfig') private _firebaseUiConfig: ExtendedFirebaseUIAuthConfig,
@Inject('firebaseUIAuthConfigFeature') private _firebaseUiConfig_Feature: ExtendedFirebaseUIAuthConfig,
private ngZone: NgZone,
private firebaseUIService: FirebaseuiAngularLibraryService) {
this.firebaseUISubscription = this.firebaseUIService.getFirebaseUiObservable().subscribe((fireUIInstance: AuthUI) => {
this.firebaseUIPopup(fireUIInstance);
});
}
async ngOnChanges(changes: SimpleChanges) {
await this.firebaseUIService.setLanguage(changes.language.currentValue);
}
get firebaseUiConfig(): ExtendedFirebaseUIAuthConfig {
return {
...this._firebaseUiConfig,
...this._firebaseUiConfig_Feature
};
}
ngOnInit(): void {
this.subscription = this.angularFireAuth.authState.subscribe((value: User) => {
if ((value && value.isAnonymous) || !value) {
if (this.firebaseUiConfig.signInOptions.length !== 0) {
// initialization of ngOnChanges occurs only when language value is accepted as @input. fire manually if it is not
if (!this.language) {
this.firebaseUIService.setLanguage('en');
}
} else {
throw new Error('There must be at least one AuthProvider.');
}
}
});
}
ngOnDestroy(): void {
if (!!this.subscription) {
this.subscription.unsubscribe();
}
if (!!this.firebaseUISubscription) {
this.firebaseUISubscription.unsubscribe();
}
}
private getUIAuthConfig(): ExtendedFirebaseUIAuthConfig {
if (!this.firebaseUiConfig.callbacks) {
this._firebaseUiConfig[FirebaseuiAngularLibraryComponent.COMPUTED_CALLBACKS] = true;
this._firebaseUiConfig.callbacks = this.getCallbacks();
}
return this.firebaseUiConfig;
}
private firebaseUIPopup(firebaseUiInstance: AuthUI) {
const uiAuthConfig = this.getUIAuthConfig();
// Check if callbacks got computed to reset them again after providing the to firebaseui.
// Necessary for allowing updating the firebaseui config during runtime.
let resetCallbacks = false;
if (uiAuthConfig[FirebaseuiAngularLibraryComponent.COMPUTED_CALLBACKS]) {
resetCallbacks = true;
delete uiAuthConfig[FirebaseuiAngularLibraryComponent.COMPUTED_CALLBACKS];
}
delete uiAuthConfig.language;
// show the firebaseui
firebaseUiInstance.start('#firebaseui-auth-container', uiAuthConfig);
if (resetCallbacks) {
this._firebaseUiConfig.callbacks = null;
}
}
private getCallbacks(): any { // firebaseui.Callbacks
const signInSuccessWithAuthResultCallback = (authResult: UserCredential, redirectUrl: string) => {
this.ngZone.run(() => {
this.signInSuccessWithAuthResultCallback.emit({
authResult,
redirectUrl
});
});
return this.firebaseUiConfig.signInSuccessUrl;
};
const signInFailureCallback = (error: firebaseui.auth.AuthUIError) => {
this.ngZone.run(() => {
this.signInFailureCallback.emit({
code: error.code,
credential: error.credential
});
});
return Promise.reject();
};
const uiShownCallback = () => {
this.ngZone.run(() => {
this.uiShownCallback.emit();
});
};
return {
signInSuccessWithAuthResult: signInSuccessWithAuthResultCallback,
signInFailure: signInFailureCallback,
uiShown: uiShownCallback
};
}
}
| FirebaseuiAngularLibraryComponent |
round1.go | package keygen
import (
"crypto/rand"
"errors"
"github.com/cronokirby/safenum"
"github.com/Zondax/multi-party-sig/internal/round"
"github.com/Zondax/multi-party-sig/internal/types"
"github.com/Zondax/multi-party-sig/pkg/hash"
"github.com/Zondax/multi-party-sig/pkg/math/curve"
"github.com/Zondax/multi-party-sig/pkg/math/polynomial"
"github.com/Zondax/multi-party-sig/pkg/math/sample"
"github.com/Zondax/multi-party-sig/pkg/paillier"
"github.com/Zondax/multi-party-sig/pkg/party"
zksch "github.com/Zondax/multi-party-sig/pkg/zk/sch"
)
var _ round.Round = (*round1)(nil)
type round1 struct {
*round.Helper
// PreviousSecretECDSA = sk'ᵢ
// Contains the previous secret ECDSA key share which is being refreshed
// Keygen: sk'ᵢ = nil
// Refresh: sk'ᵢ = sk'ᵢ
PreviousSecretECDSA curve.Scalar
// PreviousPublicSharesECDSA[j] = pk'ⱼ
// Keygen: pk'ⱼ = nil
// Refresh: pk'ⱼ = pk'ⱼ
PreviousPublicSharesECDSA map[party.ID]curve.Point
// PreviousChainKey contains the chain key, if we're refreshing
//
// In that case, we will simply use the previous chain key at the very end.
PreviousChainKey types.RID
// VSSSecret = fᵢ(X)
// Polynomial from which the new secret shares are computed.
// Keygen: fᵢ(0) = xⁱ
// Refresh: fᵢ(0) = 0
VSSSecret *polynomial.Polynomial
}
// VerifyMessage implements round.Round.
func (r *round1) VerifyMessage(round.Message) error { return nil }
// StoreMessage implements round.Round.
func (r *round1) StoreMessage(round.Message) error { return nil }
// Finalize implements round.Round
//
// - sample Paillier (pᵢ, qᵢ)
// - sample Pedersen Nᵢ, sᵢ, tᵢ
// - sample aᵢ <- 𝔽
// - set Aᵢ = aᵢ⋅G
// - compute Fᵢ(X) = fᵢ(X)⋅G
// - sample ridᵢ <- {0,1}ᵏ
// - sample cᵢ <- {0,1}ᵏ
// - commit to message.
func (r *round1) Finalize(out chan<- *round.Message) (round.Session, error) {
// generate Paillier and Pedersen
PaillierSecret := paillier.NewSecretKey(nil)
SelfPaillierPublic := PaillierSecret.PublicKey | // save our own share already so we are consistent with what we receive from others
SelfShare := r.VSSSecret.Evaluate(r.SelfID().Scalar(r.Group()))
// set Fᵢ(X) = fᵢ(X)•G
SelfVSSPolynomial := polynomial.NewPolynomialExponent(r.VSSSecret)
// generate Schnorr randomness
SchnorrRand := zksch.NewRandomness(rand.Reader, r.Group(), nil)
// Sample RIDᵢ
SelfRID, err := types.NewRID(rand.Reader)
if err != nil {
return r, errors.New("failed to sample Rho")
}
chainKey, err := types.NewRID(rand.Reader)
if err != nil {
return r, errors.New("failed to sample c")
}
// commit to data in message 2
SelfCommitment, Decommitment, err := r.HashForID(r.SelfID()).Commit(
SelfRID, chainKey, SelfVSSPolynomial, SchnorrRand.Commitment(), ElGamalPublic,
SelfPedersenPublic.N(), SelfPedersenPublic.S(), SelfPedersenPublic.T())
if err != nil {
return r, errors.New("failed to commit")
}
// should be broadcast but we don't need that here
msg := &broadcast2{Commitment: SelfCommitment}
err = r.BroadcastMessage(out, msg)
if err != nil {
return r, err
}
nextRound := &round2{
round1: r,
VSSPolynomials: map[party.ID]*polynomial.Exponent{r.SelfID(): SelfVSSPolynomial},
Commitments: map[party.ID]hash.Commitment{r.SelfID(): SelfCommitment},
RIDs: map[party.ID]types.RID{r.SelfID(): SelfRID},
ChainKeys: map[party.ID]types.RID{r.SelfID(): chainKey},
ShareReceived: map[party.ID]curve.Scalar{r.SelfID(): SelfShare},
ElGamalPublic: map[party.ID]curve.Point{r.SelfID(): ElGamalPublic},
PaillierPublic: map[party.ID]*paillier.PublicKey{r.SelfID(): SelfPaillierPublic},
NModulus: map[party.ID]*safenum.Modulus{r.SelfID(): SelfPedersenPublic.N()},
S: map[party.ID]*safenum.Nat{r.SelfID(): SelfPedersenPublic.S()},
T: map[party.ID]*safenum.Nat{r.SelfID(): SelfPedersenPublic.T()},
ElGamalSecret: ElGamalSecret,
PaillierSecret: PaillierSecret,
PedersenSecret: PedersenSecret,
SchnorrRand: SchnorrRand,
Decommitment: Decommitment,
}
return nextRound, nil
}
// PreviousRound implements round.Round.
func (round1) PreviousRound() round.Round { return nil }
// MessageContent implements round.Round.
func (round1) MessageContent() round.Content { return nil }
// Number implements round.Round.
func (round1) Number() round.Number { return 1 } | SelfPedersenPublic, PedersenSecret := PaillierSecret.GeneratePedersen()
ElGamalSecret, ElGamalPublic := sample.ScalarPointPair(rand.Reader, r.Group())
|
integration_test.go | package integration
import (
msg_parser "github.com/kaifei-bianjie/msg-parser"
"github.com/stretchr/testify/suite"
"testing"
)
type IntegrationTestSuite struct {
msg_parser.MsgClient
suite.Suite
}
type SubTest struct {
testName string
testCase func(s IntegrationTestSuite)
}
func TestSuite(t *testing.T) |
func (s *IntegrationTestSuite) SetupSuite() {
s.MsgClient = msg_parser.NewMsgClient()
}
| {
suite.Run(t, new(IntegrationTestSuite))
} |
gcodeServer.py | import io
import os, sys
import threading
import numpy as np
import base64, string
from PIL import Image
from threading import Lock
from flask import url_for, Flask, request, redirect, render_template, send_from_directory, jsonify
import gcodeCompare
app = Flask(__name__, template_folder='templates')
lock = Lock()
@app.route('/')
def web_form():
return render_template('web_form.html')
@app.route('/GcodeViewer_server_version.html')
def gcodeViewer():
return render_template('GcodeViewer_server_version.html')
@app.route('/camera1_server_version.html')
def camera1():
return render_template('camera1_server_version.html')
@app.route('/camera2_server_version.html')
def camera2():
return render_template('camera2_server_version.html')
@app.route('/upload', methods=['POST'])
def upload():
imageURL = request.form.get('dataURL', '')
rotate_frequency = request.form.get('rotate_frequency', '')
image_position_data_x = request.form.get('camera_position_x', '')
image_position_data_y = request.form.get('camera_position_y', '')
image_position_data_z = request.form.get('camera_position_z', '')
pil_img = Image.open(io.BytesIO(base64.b64decode(imageURL)))
lock.acquire()
gcodeCompare.image_compare(pil_img, rotate_frequency, image_position_data_x, image_position_data_y, image_position_data_z)
lock.release()
return('upload success!')
@app.route('/rotate', methods=['GET'])
def rotate():
camera_position_list = gcodeCompare.get_image_compare_list()
print('rotate flask: ' + str(camera_position_list))
rotate_data_flask = {
'rotate_time': camera_position_list[0][0],
'position_x' : camera_position_list[0][1],
'position_y' : camera_position_list[0][2],
'position_z' : camera_position_list[0][3]
}
return jsonify(rotate_data_flask)
def gcodeImage_filename(upload_Time):
|
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0', port=8888)
lock = threading.Lock() | filename = 'gcodeImage_' + str(upload_Time) + '.jpg'
return filename |
datasource.ts | import { Dictionary } from '@salesforce/ts-types';
import {
SteedosDriver,
SteedosMongoDriver,
SteedosMeteorMongoDriver,
SteedosSqlite3Driver,
SteedosSqlServerDriver,
SteedosPostgresDriver,
SteedosOracleDriver,
SteedosMySqlDriver
} from '../driver';
import _ = require('lodash');
import { SteedosQueryOptions, SteedosQueryFilters } from './query';
import {
SteedosIDType,
SteedosObjectType,
SteedosObjectTypeConfig,
SteedosSchema,
SteedosObjectPermissionTypeConfig,
SteedosObjectPermissionType,
getAppConfigs,
getDashboardConfigs,
getSteedosSchema
} from '.';
import { SteedosDriverConfig } from '../driver';
import { buildGraphQLSchema } from '../graphql';
import { GraphQLSchema } from 'graphql';
import { getObjectConfigs, addObjectConfig, addAllConfigFiles } from '.';
let Fiber = require('fibers');
var path = require('path')
export enum SteedosDatabaseDriverType {
Mongo = 'mongo',
MeteorMongo = 'meteor-mongo',
Sqlite = 'sqlite',
SqlServer = 'sqlserver',
Postgres = 'postgres',
Oracle = 'oracle',
MySql = 'mysql'
}
export type SteedosDataSourceTypeConfig = {
name?: string
driver: SteedosDatabaseDriverType | string | SteedosDriver
logging?: boolean | Array<any>
url?: string
host?: string,
port?: number,
username?: string
password?: string,
database?: string,
connectString?: string,
timezone?: string,
options?: any
objects?: Dictionary<SteedosObjectTypeConfig>
objectFiles?: string[]
objectsRolesPermission?: Dictionary<Dictionary<SteedosObjectPermissionTypeConfig>>
getRoles?: Function //TODO 尚未开放此功能
enable_space?: boolean
locale?: string //指定新建数据库表的默认语言,如zh,可用于字段的默认排序
}
export class SteedosDataSourceType implements Dictionary {
[key: string]: unknown;
private _name: string;
public get name(): string {
return this._name;
}
private _adapter: SteedosDriver;
public get adapter(): SteedosDriver {
return this._adapter;
}
private _getRoles: Function;
private _url: string;
private _host: string;
private _port: number;
private _username?: string;
private _password?: string;
private _database?: string;
private _connectString?: string;
private _timezone?: string;
private _options?: any;
private _locale?: string;
private _schema: SteedosSchema;
private _objects: Dictionary<SteedosObjectType> = {};
private _objectsConfig: Dictionary<SteedosObjectTypeConfig> = {};
private _objectsRolesPermission: Dictionary<Dictionary<SteedosObjectPermissionType>> = {};
private _objectsSpaceRolesPermission: Dictionary<Dictionary<Dictionary<SteedosObjectPermissionType>>> = {};
private _driver: SteedosDatabaseDriverType | string | SteedosDriver;
private _logging: boolean | Array<any>;
private _graphQLSchema: GraphQLSchema;
private _config: SteedosDataSourceTypeConfig;
private _enable_space: boolean;
public get enable_space(): boolean {
return this._enable_space;
}
public get config(): SteedosDataSourceTypeConfig {
return this._config;
}
public set config(value: SteedosDataSourceTypeConfig) {
this._config = value;
}
public get driver(): SteedosDatabaseDriverType | string | SteedosDriver {
return this._driver;
}
getObjects() {
return this._objects
}
getObject(name: string) {
return this._objects[name]
}
getObjectsConfig() {
return this._objectsConfig;
}
setObject(object_name: string, objectConfig: SteedosObjectTypeConfig) {
let object = new SteedosObjectType(object_name, this, objectConfig)
this._objectsConfig[object_name] = objectConfig;
this._objects[object_name] = object;
}
removeObject(object_name: string){
delete this._objectsConfig[object_name];
delete this._objects[object_name];
this.schema.removeObjectMap(object_name);
}
initDriver() {
let driverConfig: SteedosDriverConfig = {
url: this._url,
host: this._host,
port: this._port,
username: this._username,
password: this._password,
database: this._database,
connectString: this._connectString,
timezone: this._timezone,
options: this._options,
logging: this._logging,
locale: this._locale
}
if (_.isString(this.config.driver)) {
switch (this.config.driver) {
case SteedosDatabaseDriverType.Mongo:
this._adapter = new SteedosMongoDriver(driverConfig);
break;
case SteedosDatabaseDriverType.MeteorMongo:
this._adapter = new SteedosMeteorMongoDriver(driverConfig);
break;
case SteedosDatabaseDriverType.Sqlite:
this._adapter = new SteedosSqlite3Driver(driverConfig);
break;
case SteedosDatabaseDriverType.SqlServer:
this._adapter = new SteedosSqlServerDriver(driverConfig);
break;
case SteedosDatabaseDriverType.Postgres:
this._adapter = new SteedosPostgresDriver(driverConfig);
break;
case SteedosDatabaseDriverType.Oracle:
this._adapter = new SteedosOracleDriver(driverConfig);
break;
case SteedosDatabaseDriverType.MySql:
this._adapter = new SteedosMySqlDriver(driverConfig);
break;
default:
throw new Error(`the driver ${this.config.driver} is not supported`)
}
} else {
this._adapter = this.config.driver
}
}
initObjects(){
// 从缓存中加载所有本数据源对象到datasource中
let objects: Array<SteedosObjectTypeConfig> = getObjectConfigs(this._name);
_.each(objects, (object) => {
this.setObject(object.name, object);
});
_.each(this.config.objectsRolesPermission, (objectRolesPermission, object_name) => {
_.each(objectRolesPermission, (objectRolePermission, role_name) => {
objectRolePermission.name = role_name
this.setObjectPermission(object_name, objectRolePermission)
})
})
}
constructor(datasource_name: string, config: SteedosDataSourceTypeConfig, schema: SteedosSchema) {
this._name = datasource_name
this.config = config
this._url = config.url
this._host = config.host
this._port = config.port
this._username = config.username
this._password = config.password
this._database = config.database
this._connectString = config.connectString
this._timezone = config.timezone
this._options = config.options
this._schema = schema
this._driver = config.driver
this._logging = config.logging
this._locale = config.locale
if(_.has(config, 'enable_space')){
this._enable_space = config.enable_space
}else{
if(this._driver == SteedosDatabaseDriverType.MeteorMongo || this._driver == SteedosDatabaseDriverType.Mongo){
this._enable_space = true
}else{
this._enable_space = false
}
}
this.initDriver();
// 添加对象到缓存
_.each(this.config.objects, (object, object_name) => {
object.name = object_name
addObjectConfig(object, this._name);
})
// 添加对象到缓存
_.each(this.config.objectFiles, (objectPath) => {
let filePath = objectPath;
if(!path.isAbsolute(objectPath)){
filePath = path.join(process.cwd(), objectPath)
}
addAllConfigFiles(filePath, this._name)
})
if (config.getRoles && !_.isFunction(config.getRoles)) {
throw new Error('getRoles must be a function')
}
this._getRoles = config.getRoles
}
setObjectPermission(object_name: string, objectRolePermission: SteedosObjectPermissionTypeConfig) {
let objectPermissions = this._objectsRolesPermission[object_name]
if (!objectPermissions) {
this._objectsRolesPermission[object_name] = {}
}
this._objectsRolesPermission[object_name][objectRolePermission.name] = new SteedosObjectPermissionType(object_name, objectRolePermission)
}
getObjectRolesPermission(object_name: string) {
return this._objectsRolesPermission[object_name]
}
setObjectSpacePermission(object_name: string, spaceId: string, objectRolePermission: SteedosObjectPermissionTypeConfig) {
let objectPermissions = this._objectsSpaceRolesPermission[object_name]
if (!objectPermissions) {
this._objectsSpaceRolesPermission[object_name] = {}
}
let objectSpacePermissions = this._objectsSpaceRolesPermission[object_name][spaceId]
if (!objectSpacePermissions) {
this._objectsSpaceRolesPermission[object_name][spaceId] = {}
}
this._objectsSpaceRolesPermission[object_name][spaceId][objectRolePermission.name] = new SteedosObjectPermissionType(object_name, objectRolePermission)
}
getObjectSpaceRolesPermission(object_name: string, spaceId: string) {
if(this._objectsSpaceRolesPermission[object_name]){
return this._objectsSpaceRolesPermission[object_name][spaceId]
}
}
removeObjectSpacePermission(object_name: string, spaceId: string, objectRolePermissionName: string){
if(this._objectsSpaceRolesPermission[object_name] && this._objectsSpaceRolesPermission[object_name][spaceId]){
delete this._objectsSpaceRolesPermission[object_name][spaceId][objectRolePermissionName];
}
}
async getRoles(userId: SteedosIDType) {
if (this._getRoles) {
return await this._getRoles(userId)
} else {
return ['admin']
}
}
async find(tableName: string, query: SteedosQueryOptions, userId?: SteedosIDType) {
return await this._adapter.find(tableName, query, userId)
}
async findOne(tableName: string, id: SteedosIDType, query: SteedosQueryOptions, userId?: SteedosIDType) {
return await this._adapter.findOne(tableName, id, query, userId)
}
async insert(tableName: string, doc: Dictionary<any>, userId?: SteedosIDType) {
return await this._adapter.insert(tableName, doc, userId)
}
async update(tableName: string, id: SteedosIDType | SteedosQueryOptions, doc: Dictionary<any>, userId?: SteedosIDType) {
return await this._adapter.update(tableName, id, doc, userId)
}
async updateOne(tableName: string, id: SteedosIDType | SteedosQueryOptions, doc: Dictionary<any>, userId?: SteedosIDType) {
return await this._adapter.updateOne(tableName, id, doc, userId)
}
async updateMany(tableName: string, queryFilters: SteedosQueryFilters, doc: Dictionary<any>, userId?: SteedosIDType) {
return await this._adapter.updateMany(tableName, queryFilters, doc, userId)
}
async delete(tableName: string, id: SteedosIDType | SteedosQueryOptions, userId?: SteedosIDType) {
return await this._adapter.delete(tableName, id, userId)
}
async count(tableName: string, query: SteedosQueryOptions, userId?: SteedosIDType) {
return await this._adapter.count(tableName, query, userId)
}
async directInsert(tableName: string, doc: Dictionary<any>, userId?: SteedosIDType) {
return await this._adapter.directInsert(tableName, doc, userId)
}
async directUpdate(tableName: string, id: SteedosIDType | SteedosQueryOptions, doc: Dictionary<any>, userId?: SteedosIDType) {
return await this._adapter.directUpdate(tableName, id, doc, userId)
}
async directDelete(tableName: string, id: SteedosIDType | SteedosQueryOptions, userId?: SteedosIDType) {
return await this._adapter.directDelete(tableName, id, userId)
}
public get schema(): SteedosSchema {
return this._schema;
}
buildGraphQLSchema() {
this._graphQLSchema = buildGraphQLSchema(this._schema, this);
return this._graphQLSchema;
}
getGraphQLSchema() {
if (this._graphQLSchema) {
return this._graphQLSchema;
}
return buildGraphQLSchema(this._schema, this);
}
async dropEntities() {
if (this._adapter.dropEntities) {
return await this._adapter.dropEntities();
}
}
registerEntities() {
if (this._adapter.registerEntities) {
return this._adapter.registerEntities(this._objects);
}
}
async dropTables() {
if (this._adapter.dropEntities) {
return await this._adapter.dropEntities();
}
}
async createTables() {
if (this._adapter.createTables) {
return await this._adapter.createTables(this._objects);
}
}
init() {
this.initObjects();
this.initTypeORM();
// this.schema.transformReferenceOfObject(this);
}
initTypeORM() {
if (this._adapter.init) {
let self = this;
Fiber(function(){
let fiber = Fiber.current;
self._adapter.init(self._objects).then(result => {
fiber.run();
}).catch(result => {
console.error(result)
fiber.run();
})
Fiber.yield();
}).run();
}
}
// 暂时保留,兼容creator bootstrap接口
getAppsConfig() {
return getAppConfigs()
}
getDashboardsConfig() {
return getDashboardConfigs()
}
async connect() {
this.initObjects();
// init typeorm
if (this._adapter.connect)
await this._adapter.connect()
// init typeorm
if (this._adapter.init)
await this._adapter.init(this._objects)
}
async close() {
if (this._adapter.close)
this._adapter.close()
}
}
export function getDataSource(datasourceName: string, schema?: SteedosSchema) {
return (schema ? schema : getSteedosSchema()).getDataSource(datasourceName); |
} |
|
deepset.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from typing import cast, List, Literal, Optional
import numpy as np
import numpy.typing as npt
import pytorch_lightning as pl
import torch
from lightkit.data import DataLoader
from torch import nn
from torch.utils.data import TensorDataset
from tsbench.config import Config, EnsembleConfig
from tsbench.evaluations.tracking import EnsembleTracker
from tsbench.surrogate.torch.deepset import DeepSetModel
from ._base import OutputNormalization, Surrogate
from ._factory import register_ensemble_surrogate
from .torch import DeepSetLightningModule, ListMLELoss
from .transformers import EnsembleConfigTransformer
@register_ensemble_surrogate("deepset")
class DeepSetSurrogate(Surrogate[EnsembleConfig]):
"""
The DeepSet surrogate is similar to the MLP surrogate but makes predictions for ensembles
rather than single models. Currently, it does not support the use of dataset features.
"""
trainer_: pl.Trainer
models_: List[nn.Module]
def __init__(
self,
tracker: EnsembleTracker,
objective: Literal["regression", "ranking"] = "ranking",
discount: Optional[
Literal["logarithmic", "linear", "quadratic"]
] = "linear",
hidden_layer_sizes: Optional[List[int]] = None,
weight_decay: float = 0.01,
dropout: float = 0.0,
predict: Optional[List[str]] = None,
output_normalization: OutputNormalization = None,
impute_simulatable: bool = False,
):
"""
Args:
tracker: A tracker that can be used to impute latency and number of model parameters
into model performances. Also, it is required for some input features.
objective: The optimization objective for the XGBoost estimators.
discount: The discount to apply for the ranking loss. If provided, it focuses on
correctly predicting the top values.
hidden_layer_sizes: The dimensions of the hidden layers. Defaults to two hidden layers
of size 32.
weight_decay: The weight decay to apply during optimization.
dropout: The dropout probability of dropout layers applied after every activation
function.
predict: The metrics to predict. All if not provided.
output_normalization: The type of normalization to apply to the features of each
dataset independently. `None` applies no normalization, "quantile" applies quantile
normalization, and "standard" transforms data to have zero mean and unit variance.
impute_simulatable: Whether the tracker should impute latency and number of model
parameters into the returned performance object.
"""
super().__init__(
tracker, predict, output_normalization, impute_simulatable
)
self.use_ranking = objective == "ranking"
self.hidden_layer_sizes = hidden_layer_sizes or [32, 32]
self.weight_decay = weight_decay
self.dropout = dropout
self.config_transformer = EnsembleConfigTransformer()
if objective == "regression":
self.loss = nn.MSELoss()
elif objective == "ranking":
self.loss = ListMLELoss(discount=discount)
@property
def required_cpus(self) -> int:
return 4
def _fit(
self, X: List[Config[EnsembleConfig]], y: npt.NDArray[np.float32]
) -> None:
# Fit transformers to infer dimensionality
X_numpy_list = self.config_transformer.fit_transform(X)
X_numpy = np.concatenate(X_numpy_list)
X_lengths_numpy = np.array([x.shape[0] for x in X_numpy_list])
input_dim = len(self.config_transformer.feature_names_)
output_dim = y.shape[1]
# For initializing data, we prepare group IDs for the datasets
mapping = {d: i for i, d in enumerate({x.dataset for x in X})}
# For each output variable, we need to train a separate model
self.models_ = []
for i in range(output_dim):
model = self._init_model(input_dim)
module = DeepSetLightningModule(
model, self.loss, self.weight_decay
)
# Train on output variable i
dataset = TensorDataset(
torch.from_numpy(X_numpy).float(),
torch.from_numpy(X_lengths_numpy).long(),
torch.from_numpy(y[:, i : i + 1]).float(),
torch.as_tensor(
[mapping[x.dataset] for x in X], dtype=torch.long
),
)
train_loader = DataLoader(dataset, batch_size=len(dataset))
self._trainer.fit(module, train_dataloaders=train_loader)
# Add to models
self.models_.append(model)
def _predict(
self, X: List[Config[EnsembleConfig]]
) -> npt.NDArray[np.float32]:
# Get data
X_numpy_list = self.config_transformer.transform(X) | dataset = TensorDataset(
torch.from_numpy(X_numpy).float(),
torch.from_numpy(X_lengths_numpy).long(),
)
test_loader = DataLoader(dataset, batch_size=len(dataset))
# Run prediction
predictions = []
for model in self.models_:
module = DeepSetLightningModule(model, self.loss)
out = cast(
List[torch.Tensor], self._trainer.predict(module, test_loader)
)
predictions.append(out[0].numpy())
return np.concatenate(predictions, axis=-1)
@property
def _trainer(self) -> pl.Trainer:
return pl.Trainer(
max_epochs=1000,
logger=False,
enable_checkpointing=False,
enable_model_summary=False,
enable_progress_bar=False,
gpus=int(torch.cuda.is_available()),
)
def _init_model(self, input_dim: int) -> nn.Module:
return DeepSetModel(
input_dim,
self.hidden_layer_sizes[-1],
1,
self.hidden_layer_sizes,
self.hidden_layer_sizes,
self.dropout,
) | X_numpy = np.concatenate(X_numpy_list)
X_lengths_numpy = np.array([x.shape[0] for x in X_numpy_list])
|
index.d.ts | export { default } from './SliderUnstyled';
export * from './SliderUnstyled';
export { default as SliderValueLabelUnstyled } from './SliderValueLabelUnstyled';
export * from './SliderValueLabelUnstyled'; |
export { default as sliderUnstyledClasses } from './sliderUnstyledClasses';
export * from './sliderUnstyledClasses'; | |
pageos.py | # -*- encoding: utf-8 -*-
"""
@File : pageos.py
@Time : 2020/2/28 7:49 下午
@Author : zhengjiani
@Email : [email protected]
@Software: PyCharm
"""
from flask import jsonify, Blueprint, Response, request
from . import api
from ..code import ResponseCode
from ..response import ResMsg
from ..util import route
@route(api,'/polists',methods=['GET'])
def get_po_files():
res = ResMsg()
res_dict = {
"totalpage": 5,
"pagenum": 4,
"total": 8,
"pos": [
{'poname':'PetClinic_page', | {'poname':'pageKit_page',
'file_path':'/Users/zhengjiani/PycharmProjects/PageOs_v0.1/bokchoy_pages/pageKit/po/pageKit_po_page.py',
'graph_path':'http://localhost:5000/static/pageKit.png'
},
{'poname':'phoneix_page',
'file_path':'/Users/zhengjiani/PycharmProjects/PageOs_v0.1/bokchoy_pages/phoenix/phoenix_page.py',
'graph_path':'http://localhost:5000/static/phonenix.png'
}
]
}
res.update(code=ResponseCode.SUCCESS,data=res_dict)
return res.data | 'file_path':'/Users/zhengjiani/PycharmProjects/PageOs_v0.1/bokchoy_pages/pet_page.py',
'graph_path':'http://localhost:5000/static/PetClinic.png'
}, |
wsgi.py | """
WSGI config for dan3103_1_1 project. | For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dan3103_1_1.settings')
application = get_wsgi_application() |
It exposes the WSGI callable as a module-level variable named ``application``.
|
Alert.d.ts | import * as React from "react";
export default class | extends React.Component<any, any> {
state: any;
static defaultProps: any;
colorArr: any;
constructor(props: any);
componentWillReceiveProps(props: any): void;
render(): JSX.Element;
getMessageElement(): JSX.Element;
getColor(color: string): string;
componentWillUnmount(): void;
tmm(e: any): void;
}
| Alert |
name_test.go | package astrid
import (
"testing"
"golang.org/x/text/language"
)
func | (t *testing.T) {
tags := []language.Tag{
language.AmericanEnglish,
language.CanadianFrench,
language.Dutch,
language.French,
language.German,
language.Italian,
language.Japanese,
language.Korean,
language.LatinAmericanSpanish,
language.Russian,
language.Spanish,
language.SimplifiedChinese,
language.TraditionalChinese}
for _, tag := range tags {
name, ok := name[tag]
s := tag.String()
if !ok {
t.Fatalf("name[%s] != true", s)
}
if ok := len(name) > 0; !ok {
t.Skipf("len(name[%s]) == 0", s)
}
}
}
| TestName |
location_pull.go | // Copyright 2019 The Kanister Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kando
import (
"context"
"io"
"os"
"github.com/pkg/errors"
"github.com/spf13/cobra"
crv1alpha1 "github.com/kanisterio/kanister/pkg/apis/cr/v1alpha1"
"github.com/kanisterio/kanister/pkg/kopia"
"github.com/kanisterio/kanister/pkg/location"
"github.com/kanisterio/kanister/pkg/param"
)
const (
kopiaSnapshotFlagName = "kopia-snapshot"
)
func newLocationPullCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "pull <target>",
Short: "Pull from s3-compliant object storage to a file or stdout",
Args: cobra.ExactArgs(1),
// TODO: Example invocations
RunE: func(c *cobra.Command, args []string) error {
return runLocationPull(c, args)
},
}
cmd.Flags().StringP(kopiaSnapshotFlagName, "k", "", "Pass the kopia snapshot information from the location push command (optional)")
return cmd
}
func kopiaSnapshotFlag(cmd *cobra.Command) string {
return cmd.Flag(kopiaSnapshotFlagName).Value.String()
}
func runLocationPull(cmd *cobra.Command, args []string) error {
target, err := targetWriter(args[0])
if err != nil {
return err
}
p, err := unmarshalProfileFlag(cmd)
if err != nil {
return err
}
s := pathFlag(cmd)
ctx := context.Background()
if p.Location.Type == crv1alpha1.LocationTypeKopia {
snapJSON := kopiaSnapshotFlag(cmd)
if snapJSON == "" {
return errors.New("kopia snapshot information is required to pull data using kopia")
}
kopiaSnap, err := kopia.UnmarshalKopiaSnapshot(snapJSON)
if err != nil {
return err
}
if err = connectToKopiaServer(ctx, p); err != nil {
return err
}
return kopiaLocationPull(ctx, kopiaSnap.ID, s, target)
}
return locationPull(ctx, p, s, target)
}
func targetWriter(target string) (io.Writer, error) {
if target != usePipeParam {
return os.OpenFile(target, os.O_RDWR|os.O_CREATE, 0755)
}
return os.Stdout, nil
}
func locationPull(ctx context.Context, p *param.Profile, path string, target io.Writer) error {
return location.Read(ctx, target, *p, path)
}
// kopiaLocationPull pulls the data from a kopia snapshot into the given target
func | (ctx context.Context, backupID, path string, target io.Writer) error {
return kopia.Read(ctx, backupID, path, target)
}
// connectToKopiaServer connects to the kopia server with given creds
func connectToKopiaServer(ctx context.Context, kp *param.Profile) error {
contentCacheSize := kopia.GetDataStoreGeneralContentCacheSize(kp.Credential.KopiaServerSecret.ConnectOptions)
metadataCacheSize := kopia.GetDataStoreGeneralMetadataCacheSize(kp.Credential.KopiaServerSecret.ConnectOptions)
return kopia.ConnectToAPIServer(
ctx,
kp.Credential.KopiaServerSecret.Cert,
kp.Credential.KopiaServerSecret.Password,
kp.Credential.KopiaServerSecret.Hostname,
kp.Location.Endpoint,
kp.Credential.KopiaServerSecret.Username,
contentCacheSize,
metadataCacheSize,
)
}
| kopiaLocationPull |
SignUpSelection.js | import React, { useState, Fragment } from 'react';
import { PropTypes } from 'prop-types';
import Login from '../../Login';
import SignUp from '../../SignUp';
import Button from '../../Button';
import Title from '../../Title';
import { isDesktop } from '../../../utils';
import { StyledSignUpSelection, StyledSignUpSelectionItem } from './styles';
function SignUpSelection({ backFn, moveToResume }) {
const [showedItem, setshowedItem] = useState('login');
const handleChangeForm = step => {
setshowedItem(step);
};
const isDesktopDevice = isDesktop();
return (
<Fragment>
<Button width={isDesktopDevice ? '10%' : '30%'} value={'< Volver'} onClick={backFn} /> | <Title value={'Bienvenido, logueate'} />
<Login
showForm={showedItem === 'login'}
backFn={() => handleChangeForm('login')}
moveToResume={moveToResume}
/>
</StyledSignUpSelectionItem>
<StyledSignUpSelectionItem>
<Title value={'Soy Nuevo'} />
<SignUp showForm={showedItem === 'signup'} backFn={() => handleChangeForm('signup')} />
</StyledSignUpSelectionItem>
</StyledSignUpSelection>
</Fragment>
);
}
SignUpSelection.propTypes = {
backFn: PropTypes.func,
moveToResume: PropTypes.func,
};
export default SignUpSelection; | <StyledSignUpSelection>
<StyledSignUpSelectionItem> |
search-request.ts | import { RailCardModel } from './railcard.model';
export class | {
locfrom: any;
locto: any;
PathConstraintType:string;
PathConstraintLocation:any;
DepartureLocationName: String;
ArrivalLocationName: String;
openreturn: string;
isseasonticket: string;
oneway: string;
outwarddepartafter: string;
datetimedepart: string;
showservices: string;
enquiryMethod: string;
firstclass: string;
directServicesOnly: string;
standardClass: string;
returndepartafter: string;
datetimereturn: string;
passengergroup: RailCardModel[];
adults:number;
Children :number;
Traveltype:string;
DepartureTimesStart: string;
//RailCardList: RailCardModel[];
railcardNames:string;
ReturnTimesStart: string;
IsReturnRequest:boolean;
TraveltypeReturn: string;
TravelSolutionDirection: string;
sessionid : string;
railcardscount:any;
railcardscountReturn:any;
railcardscountOpenreturn:any;
}
| SearchRequestModel |
test_pubsub.py | #
# Copyright (C) 2014 Dell, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import uuid
import dcm.agent.events.callback as events
import dcm.agent.events.pubsub as pubsub
import dcm.agent.tests.utils.general as test_utils
class TestPubSub(unittest.TestCase):
@classmethod
def setUpClass(cls):
test_utils.connect_to_debugger()
def setUp(self):
self._event_space = events.EventSpace()
self._pub_sub = pubsub.PubSubEvent(self._event_space)
def test_simple_publish(self):
topic = str(uuid.uuid4())
x_val = 1
y_val = []
apple_val = "sauce"
def test_callback(x_param, y_param, apple_param=None):
self.assertEqual(x_param, x_val)
self.assertEqual(y_param, y_val)
self.assertEqual(apple_param, apple_val)
y_val.append("called")
self._pub_sub.subscribe(topic, test_callback)
self._pub_sub.publish(topic,
topic_args=(x_val, y_val),
topic_kwargs={'apple_param': apple_val})
self._event_space.poll(timeblock=0.0)
self.assertEqual(len(y_val), 1)
def test_multiple_subscribe(self):
topic = str(uuid.uuid4())
x_val = []
def | (x_param):
x_param.append(1)
def test_callback2(x_param):
x_param.append(2)
def test_callback3(x_param):
x_param.append(3)
self._pub_sub.subscribe(topic, test_callback1)
self._pub_sub.subscribe(topic, test_callback2)
self._pub_sub.subscribe(topic, test_callback3)
self._pub_sub.publish(topic, topic_args=(x_val,))
self._event_space.poll(timeblock=0.0)
self.assertEqual(len(x_val), 3)
self.assertIn(1, x_val)
self.assertIn(2, x_val)
self.assertIn(3, x_val)
def test_public_empty(self):
topic = str(uuid.uuid4())
self._pub_sub.publish(topic)
self._event_space.poll(timeblock=0.0)
def test_unsubscribe(self):
topic = str(uuid.uuid4())
def test_callback():
pass
self._pub_sub.subscribe(topic, test_callback)
self._pub_sub.unsubscribe(topic, test_callback)
try:
self._pub_sub.unsubscribe(topic, test_callback)
passes = False
except KeyError:
passes = True
self.assertTrue(passes)
def test_done_callback(self):
topic = str(uuid.uuid4())
x_val = []
def test_callback1(x_param):
x_param.append(1)
def test_callback2(x_param):
x_param.append(2)
def test_callback3(x_param):
x_param.append(3)
def done_cb(topic_error, x_param=None):
self.assertEqual(len(x_param), 3)
self.assertIn(1, x_param)
self.assertIn(2, x_param)
self.assertIn(3, x_param)
self.assertIsNone(topic_error)
x_param.append("done")
self._pub_sub.subscribe(topic, test_callback1)
self._pub_sub.subscribe(topic, test_callback2)
self._pub_sub.subscribe(topic, test_callback3)
self._pub_sub.publish(topic,
topic_args=(x_val,),
done_cb=done_cb,
done_kwargs={'x_param': x_val})
self._event_space.poll(timeblock=0.0)
self.assertIn('done', x_val)
def test_done_error_callback(self):
topic = str(uuid.uuid4())
x_val = []
def test_callback1(x_param):
x_param.append(1)
def test_callback2(x_param):
raise Exception("error")
def test_callback3(x_param):
x_param.append(3)
def done_cb(topic_error, x_param=None):
self.assertLess(len(x_param), 3)
self.assertIsNotNone(topic_error)
x_param.append("done")
self._pub_sub.subscribe(topic, test_callback1)
self._pub_sub.subscribe(topic, test_callback2)
self._pub_sub.subscribe(topic, test_callback3)
self._pub_sub.publish(topic,
topic_args=(x_val,),
done_cb=done_cb,
done_kwargs={'x_param': x_val})
self._event_space.poll(timeblock=0.0)
self.assertIn('done', x_val)
| test_callback1 |
dateshift.py | """
The basic date shifting rule..
Original Issue: DC-1005
This is an abstract class and cannot be directly instantiated. It must be
extended to be used.
"""
# Python Imports
import logging
from abc import abstractmethod
# Project imports
from cdr_cleaner.cleaning_rules.base_cleaning_rule import BaseCleaningRule
from common import JINJA_ENV
LOGGER = logging.getLogger(__name__)
SHIFT_EXP = JINJA_ENV.from_string("""
{{field_type}}_SUB( CAST({{field}} AS {{field_type}}), INTERVAL (
SELECT
shift
FROM
`{{project}}.{{mapping_dataset_id}}.{{mapping_table_id}}` AS map
WHERE
map.research_id = remodel.person_id) DAY) AS {{field}}
""")
SELECT_STATEMENT = JINJA_ENV.from_string("""
CREATE OR REPLACE TABLE `{{project}}.{{dataset}}.{{table}}` AS (
SELECT
{{fields}}
FROM `{{project}}.{{dataset}}.{{table}}` AS remodel)
""")
class DateShiftRule(BaseCleaningRule):
"""
Date shift fields from 1 - 365 days in the past.
Performs a "day" shift for any field in the provided table names
and schemas. Uses the field type to determine the shift function to
use. Currently works for the DATE, DATETIME, and TIMESTAMP type fields.
"""
def __init__(self,
project_id,
dataset_id,
sandbox_dataset_id,
issue_numbers,
description,
affected_datasets,
affected_tables,
mapping_dataset_id,
mapping_table_id,
depends_on=None):
"""
Initialize the class.
Set the issue numbers, description and affected datasets. As other
tickets may affect this SQL, append them to the list of Jira Issues.
DO NOT REMOVE ORIGINAL JIRA ISSUE NUMBERS!
"""
if depends_on is None:
depends_on = []
desc = (f'Date shift date and timestamp fields by the date shift '
f'calculated in the static mapping table.')
self.mapping_dataset_id = mapping_dataset_id
self.mapping_table_id = mapping_table_id
super().__init__(issue_numbers=issue_numbers,
description=description,
affected_datasets=affected_datasets,
project_id=project_id,
dataset_id=dataset_id,
sandbox_dataset_id=sandbox_dataset_id,
affected_tables=affected_tables,
depends_on=depends_on)
@abstractmethod
def | (self):
"""
Provide dictionary of table names and schemas.
:returns: a dictionary whose key, value patterns are in the
form of {"tablename": "json schema",}.
"""
pass
def get_query_specs(self):
"""
Return a list of dictionary query specifications.
:return: A list of dictionaries. Each dictionary contains a
single query and a specification for how to execute that query.
The specifications are optional but the query is required.
"""
date_shift_queries = []
for table, schema in self.get_tables_and_schemas().items():
LOGGER.info(f"Building Date Shifting query for {self.dataset_id}."
f"{table}")
fields = []
for field in schema:
field_type = field.get('type').lower()
field_name = field.get('name')
if field_type in ['date', 'datetime', 'timestamp']:
shift_string = SHIFT_EXP.render(
project=self.project_id,
mapping_dataset_id=self.mapping_dataset_id,
mapping_table_id=self.mapping_table_id,
field_type=field_type.upper(),
field=field_name,
table=table)
fields.append(shift_string)
else:
fields.append(field_name)
fields_string = ',\n'.join(fields)
query = SELECT_STATEMENT.render(project=self.project_id,
dataset=self.dataset_id,
table=table,
fields=fields_string)
date_shift_queries.append({'query': query})
return date_shift_queries
| get_tables_and_schemas |
register.go | package service
import (
"sync"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"gitlab.com/gitlab-org/gitaly/client"
"gitlab.com/gitlab-org/gitaly/internal/gitaly/config"
gitalyhook "gitlab.com/gitlab-org/gitaly/internal/gitaly/hook"
"gitlab.com/gitlab-org/gitaly/internal/gitaly/rubyserver"
"gitlab.com/gitlab-org/gitaly/internal/gitaly/service/blob"
"gitlab.com/gitlab-org/gitaly/internal/gitaly/service/cleanup"
"gitlab.com/gitlab-org/gitaly/internal/gitaly/service/commit"
"gitlab.com/gitlab-org/gitaly/internal/gitaly/service/conflicts"
"gitlab.com/gitlab-org/gitaly/internal/gitaly/service/diff"
"gitlab.com/gitlab-org/gitaly/internal/gitaly/service/hook"
"gitlab.com/gitlab-org/gitaly/internal/gitaly/service/internalgitaly"
"gitlab.com/gitlab-org/gitaly/internal/gitaly/service/namespace"
"gitlab.com/gitlab-org/gitaly/internal/gitaly/service/objectpool"
"gitlab.com/gitlab-org/gitaly/internal/gitaly/service/operations"
"gitlab.com/gitlab-org/gitaly/internal/gitaly/service/ref"
"gitlab.com/gitlab-org/gitaly/internal/gitaly/service/remote"
"gitlab.com/gitlab-org/gitaly/internal/gitaly/service/repository"
"gitlab.com/gitlab-org/gitaly/internal/gitaly/service/server"
"gitlab.com/gitlab-org/gitaly/internal/gitaly/service/smarthttp"
"gitlab.com/gitlab-org/gitaly/internal/gitaly/service/ssh"
"gitlab.com/gitlab-org/gitaly/internal/gitaly/service/wiki"
"gitlab.com/gitlab-org/gitaly/internal/storage"
"gitlab.com/gitlab-org/gitaly/proto/go/gitalypb"
"google.golang.org/grpc"
"google.golang.org/grpc/health"
healthpb "google.golang.org/grpc/health/grpc_health_v1"
)
var (
once sync.Once
smarthttpPackfileNegotiationMetrics *prometheus.CounterVec
sshPackfileNegotiationMetrics *prometheus.CounterVec
)
func registerMetrics(cfg config.Cfg) {
smarthttpPackfileNegotiationMetrics = promauto.NewCounterVec(
prometheus.CounterOpts{
Namespace: "gitaly",
Subsystem: "smarthttp",
Name: "packfile_negotiation_requests_total",
Help: "Total number of features used for packfile negotiations",
},
[]string{"git_negotiation_feature"},
)
sshPackfileNegotiationMetrics = promauto.NewCounterVec(
prometheus.CounterOpts{
Namespace: "gitaly",
Subsystem: "ssh",
Name: "packfile_negotiation_requests_total",
Help: "Total number of features used for packfile negotiations",
},
[]string{"git_negotiation_feature"},
)
}
// RegisterAll will register all the known grpc services with
// the specified grpc service instance
func RegisterAll(grpcServer *grpc.Server, cfg config.Cfg, rubyServer *rubyserver.Server, hookManager gitalyhook.Manager, locator storage.Locator, conns *client.Pool) | {
once.Do(func() {
registerMetrics(cfg)
})
gitalypb.RegisterBlobServiceServer(grpcServer, blob.NewServer(rubyServer, locator))
gitalypb.RegisterCleanupServiceServer(grpcServer, cleanup.NewServer(locator))
gitalypb.RegisterCommitServiceServer(grpcServer, commit.NewServer(cfg, locator))
gitalypb.RegisterDiffServiceServer(grpcServer, diff.NewServer(locator))
gitalypb.RegisterNamespaceServiceServer(grpcServer, namespace.NewServer(locator))
gitalypb.RegisterOperationServiceServer(grpcServer, operations.NewServer(cfg, rubyServer, hookManager, locator, conns))
gitalypb.RegisterRefServiceServer(grpcServer, ref.NewServer(locator))
gitalypb.RegisterRepositoryServiceServer(grpcServer, repository.NewServer(cfg, rubyServer, locator))
gitalypb.RegisterSSHServiceServer(grpcServer, ssh.NewServer(
locator,
ssh.WithPackfileNegotiationMetrics(sshPackfileNegotiationMetrics),
))
gitalypb.RegisterSmartHTTPServiceServer(grpcServer, smarthttp.NewServer(
locator,
smarthttp.WithPackfileNegotiationMetrics(smarthttpPackfileNegotiationMetrics),
))
gitalypb.RegisterWikiServiceServer(grpcServer, wiki.NewServer(rubyServer, locator))
gitalypb.RegisterConflictsServiceServer(grpcServer, conflicts.NewServer(rubyServer, cfg, locator))
gitalypb.RegisterRemoteServiceServer(grpcServer, remote.NewServer(rubyServer, locator))
gitalypb.RegisterServerServiceServer(grpcServer, server.NewServer(cfg.Storages))
gitalypb.RegisterObjectPoolServiceServer(grpcServer, objectpool.NewServer(cfg, locator))
gitalypb.RegisterHookServiceServer(grpcServer, hook.NewServer(cfg, hookManager))
gitalypb.RegisterInternalGitalyServer(grpcServer, internalgitaly.NewServer(cfg.Storages))
healthpb.RegisterHealthServer(grpcServer, health.NewServer())
} |
|
schema.go | /*
* Copyright 2018- The Pixie Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
GraphQL Schema setup file.
*/
//go:generate go-bindata -modtime=0 -ignore=\.go -ignore=\.ts -ignore=\.sh -ignore=\.bazel -pkg=unauthenticatedschema -o=bindata.gen.go ./...
package unauthenticatedschema
import "bytes"
// MustLoadSchema reads all the bindata .graphql schema files and concats them together into
// one string.
func | () string {
buf := bytes.Buffer{}
for _, name := range AssetNames() {
b := MustAsset(name)
buf.Write(b)
// Add a newline if the file does not end in a newline.
if len(b) > 0 && b[len(b)-1] != '\n' {
buf.WriteByte('\n')
}
}
return buf.String()
}
| MustLoadSchema |
service_test.go | package kube
import (
"context"
"testing"
"github.com/golang/protobuf/ptypes/timestamp"
"github.com/pkg/errors"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/helm/pkg/helm"
"k8s.io/helm/pkg/proto/hapi/chart"
"k8s.io/helm/pkg/proto/hapi/release"
"k8s.io/helm/pkg/proto/hapi/services"
"k8s.io/helm/pkg/timeconv"
"github.com/supergiant/control/pkg/model"
"github.com/supergiant/control/pkg/runner/ssh"
"github.com/supergiant/control/pkg/sgerrors"
"github.com/supergiant/control/pkg/sghelm/proxy"
"github.com/supergiant/control/pkg/testutils"
"github.com/supergiant/control/pkg/testutils/storage"
)
var (
fakeRls = &release.Release{
Name: "fakeRelease",
Info: &release.Info{
FirstDeployed: ×tamp.Timestamp{},
LastDeployed: ×tamp.Timestamp{},
Status: &release.Status{
Code: release.Status_UNKNOWN,
},
},
Chart: &chart.Chart{
Metadata: &chart.Metadata{},
},
}
)
type fakeChartGetter struct {
chrt *chart.Chart
err error
}
func (f fakeChartGetter) GetChart(ctx context.Context, repoName, chartName, chartVersion string) (*chart.Chart, error) {
return f.chrt, f.err
}
type fakeHelmProxy struct {
proxy.Interface
err error
installRlsResp *services.InstallReleaseResponse
getReleaseResp *services.GetReleaseContentResponse
listReleaseResp *services.ListReleasesResponse
uninstReleaseResp *services.UninstallReleaseResponse
}
func (p *fakeHelmProxy) InstallReleaseFromChart(chart *chart.Chart, namespace string, opts ...helm.InstallOption) (*services.InstallReleaseResponse, error) {
return p.installRlsResp, p.err
}
func (p *fakeHelmProxy) ListReleases(opts ...helm.ReleaseListOption) (*services.ListReleasesResponse, error) {
return p.listReleaseResp, p.err
}
func (p *fakeHelmProxy) ReleaseContent(rlsName string, opts ...helm.ContentOption) (*services.GetReleaseContentResponse, error) {
return p.getReleaseResp, p.err
}
func (p *fakeHelmProxy) DeleteRelease(rlsName string, opts ...helm.DeleteOption) (*services.UninstallReleaseResponse, error) {
return p.uninstReleaseResp, p.err
}
type mockServerResourceGetter struct {
resources []*metav1.APIResourceList
err error
}
func (m *mockServerResourceGetter) ServerResources() ([]*metav1.APIResourceList, error) {
return m.resources, m.err
}
func TestKubeServiceGet(t *testing.T) {
testCases := []struct {
expectedName string
data []byte
err error
}{
{
expectedName: "kube-name-1234",
data: []byte(`{"name":"kube-name-1234"}`),
err: nil,
},
{
data: nil,
err: errors.New("test err"),
},
}
prefix := DefaultStoragePrefix
for _, testCase := range testCases {
m := new(testutils.MockStorage)
m.On("Get", context.Background(), prefix, "fake_id").
Return(testCase.data, testCase.err)
service := NewService(prefix, m, nil)
kube, err := service.Get(context.Background(), "fake_id")
if testCase.err != errors.Cause(err) {
t.Errorf("Wrong error expected %v actual %v", testCase.err, err)
return
}
if testCase.err == nil && kube.Name != testCase.expectedName {
t.Errorf("Wrong kube name expected %s actual %s", testCase.expectedName, kube.Name)
}
}
}
func TestKubeServiceCreate(t *testing.T) {
testCases := []struct {
kube *model.Kube
err error
}{
{
kube: &model.Kube{},
err: nil,
},
{
kube: &model.Kube{},
err: errors.New("test err"),
},
}
prefix := DefaultStoragePrefix
for _, testCase := range testCases {
m := new(testutils.MockStorage)
m.On("Put",
context.Background(),
prefix,
mock.Anything,
mock.Anything).
Return(testCase.err)
service := NewService(prefix, m, nil)
err := service.Create(context.Background(), testCase.kube)
if testCase.err != errors.Cause(err) {
t.Errorf("Unexpected error when create node %v", err)
}
}
}
func TestKubeServiceGetAll(t *testing.T) {
testCases := []struct {
data [][]byte
err error
}{
{
data: [][]byte{[]byte(`{"id":"kube-name-1234"}`), []byte(`{"id":"56kube-name-5678"}`)},
err: nil,
},
{
data: nil,
err: errors.New("test err"),
},
}
prefix := DefaultStoragePrefix
for _, testCase := range testCases {
m := new(testutils.MockStorage)
m.On("GetAll", context.Background(), prefix).Return(testCase.data, testCase.err)
service := NewService(prefix, m, nil)
kubes, err := service.ListAll(context.Background())
if testCase.err != errors.Cause(err) {
t.Errorf("Wrong error expected %v actual %v", testCase.err, err)
return
}
if testCase.err == nil && len(kubes) != 2 {
t.Errorf("Wrong len of kubes expected 2 actual %d", len(kubes))
}
}
}
func TestService_InstallRelease(t *testing.T) {
tcs := []struct {
svc Service
clusterID string
rlsInput *ReleaseInput
expectedRes *release.Release
expectedErr error
}{
{ // TC#1
expectedErr: sgerrors.ErrNilEntity,
},
{ // TC#2
rlsInput: &ReleaseInput{
Name: "fake",
},
svc: Service{
chrtGetter: fakeChartGetter{
err: errFake,
},
},
expectedErr: errFake,
},
{ // TC#3
rlsInput: &ReleaseInput{
Name: "fake",
},
svc: Service{
chrtGetter: &fakeChartGetter{},
storage: &storage.Fake{
GetErr: errFake,
},
},
expectedErr: errFake,
},
{ // TC#4
rlsInput: &ReleaseInput{
Name: "fake",
},
svc: Service{
chrtGetter: &fakeChartGetter{},
storage: &storage.Fake{
Item: []byte("{}"),
},
newHelmProxyFn: func(kube *model.Kube) (proxy.Interface, error) {
return nil, errFake
},
},
expectedErr: errFake,
},
{ // TC#5
rlsInput: &ReleaseInput{
Name: "fake",
},
svc: Service{
chrtGetter: &fakeChartGetter{},
storage: &storage.Fake{
Item: []byte("{}"),
},
newHelmProxyFn: func(kube *model.Kube) (proxy.Interface, error) {
return &fakeHelmProxy{
err: errFake,
}, nil
},
},
expectedErr: errFake,
},
{ // TC#6
rlsInput: &ReleaseInput{
Name: "fake",
},
svc: Service{
chrtGetter: &fakeChartGetter{},
storage: &storage.Fake{
Item: []byte("{}"),
},
newHelmProxyFn: func(kube *model.Kube) (proxy.Interface, error) {
return &fakeHelmProxy{
installRlsResp: &services.InstallReleaseResponse{
Release: fakeRls,
},
}, nil
},
},
expectedRes: fakeRls,
},
}
for i, tc := range tcs {
rls, err := tc.svc.InstallRelease(context.Background(), tc.clusterID, tc.rlsInput)
require.Equalf(t, tc.expectedErr, errors.Cause(err), "TC#%d: check errors", i+1)
if err == nil {
require.Equalf(t, tc.expectedRes, rls, "TC#%d: check results", i+1)
}
}
}
func TestService_ReleaseDetails(t *testing.T) {
tcs := []struct {
svc Service
expectedRes *release.Release
expectedErr error
}{
{ // TC#1
svc: Service{
storage: &storage.Fake{
GetErr: errFake,
},
},
expectedErr: errFake,
},
{ // TC#2
svc: Service{
storage: &storage.Fake{
Item: []byte("{}"),
},
newHelmProxyFn: func(kube *model.Kube) (proxy.Interface, error) {
return nil, errFake
},
},
expectedErr: errFake,
},
{ // TC#3
svc: Service{
storage: &storage.Fake{
Item: []byte("{}"),
},
newHelmProxyFn: func(kube *model.Kube) (proxy.Interface, error) {
return &fakeHelmProxy{
err: errFake,
}, nil
},
},
expectedErr: errFake,
},
{ // TC#4
svc: Service{
storage: &storage.Fake{
Item: []byte("{}"),
},
newHelmProxyFn: func(kube *model.Kube) (proxy.Interface, error) {
return &fakeHelmProxy{
getReleaseResp: &services.GetReleaseContentResponse{
Release: fakeRls,
},
}, nil
},
},
expectedRes: fakeRls,
},
}
for i, tc := range tcs {
rls, err := tc.svc.ReleaseDetails(context.Background(), "testCluster", "")
require.Equalf(t, tc.expectedErr, errors.Cause(err), "TC#%d: check errors", i+1)
if err == nil {
require.Equalf(t, tc.expectedRes, rls, "TC#%d: check results", i+1)
}
}
}
func TestService_ListReleases(t *testing.T) {
tcs := []struct {
svc Service
expectedRes []*model.ReleaseInfo
expectedErr error
}{
{ // TC#1
svc: Service{
storage: &storage.Fake{
GetErr: errFake,
},
},
expectedErr: errFake,
},
{ // TC#2
svc: Service{
storage: &storage.Fake{
Item: []byte("{}"),
},
newHelmProxyFn: func(kube *model.Kube) (proxy.Interface, error) {
return nil, errFake
},
},
expectedErr: errFake,
},
{ // TC#3
svc: Service{
storage: &storage.Fake{
Item: []byte("{}"),
},
newHelmProxyFn: func(kube *model.Kube) (proxy.Interface, error) {
return &fakeHelmProxy{
err: errFake,
}, nil
},
},
expectedErr: errFake,
},
{ // TC#4
svc: Service{
storage: &storage.Fake{
Item: []byte("{}"),
},
newHelmProxyFn: func(kube *model.Kube) (proxy.Interface, error) {
return &fakeHelmProxy{
listReleaseResp: &services.ListReleasesResponse{
Releases: []*release.Release{fakeRls, nil},
},
}, nil
},
},
expectedRes: []*model.ReleaseInfo{
{
Name: fakeRls.GetName(),
Namespace: fakeRls.GetNamespace(),
Version: fakeRls.GetVersion(),
CreatedAt: timeconv.String(fakeRls.GetInfo().GetFirstDeployed()),
LastDeployed: timeconv.String(fakeRls.GetInfo().GetLastDeployed()),
Chart: fakeRls.GetChart().Metadata.Name,
ChartVersion: fakeRls.GetChart().Metadata.Version,
Status: fakeRls.GetInfo().Status.Code.String(),
},
},
},
}
for i, tc := range tcs {
rls, err := tc.svc.ListReleases(context.Background(), "testCluster", "", "", 0)
require.Equalf(t, tc.expectedErr, errors.Cause(err), "TC#%d: check errors", i+1)
if err == nil {
require.Equalf(t, tc.expectedRes, rls, "TC#%d: check results", i+1)
}
}
}
func TestService_DeleteRelease(t *testing.T) {
tcs := []struct {
svc Service
expectedRes *model.ReleaseInfo
expectedErr error
}{
{ // TC#1
svc: Service{
storage: &storage.Fake{
GetErr: errFake,
},
},
expectedErr: errFake,
},
{ // TC#2
svc: Service{
storage: &storage.Fake{
Item: []byte("{}"),
},
newHelmProxyFn: func(kube *model.Kube) (proxy.Interface, error) {
return nil, errFake
},
},
expectedErr: errFake,
},
{ // TC#3
svc: Service{
storage: &storage.Fake{
Item: []byte("{}"),
},
newHelmProxyFn: func(kube *model.Kube) (proxy.Interface, error) {
return &fakeHelmProxy{
err: errFake,
}, nil
},
},
expectedErr: errFake,
},
{ // TC#4
svc: Service{
storage: &storage.Fake{
Item: []byte("{}"),
},
newHelmProxyFn: func(kube *model.Kube) (proxy.Interface, error) {
return &fakeHelmProxy{
uninstReleaseResp: &services.UninstallReleaseResponse{
Release: fakeRls,
},
}, nil
},
},
expectedRes: &model.ReleaseInfo{
Name: fakeRls.GetName(),
Namespace: fakeRls.GetNamespace(),
Version: fakeRls.GetVersion(),
CreatedAt: timeconv.String(fakeRls.GetInfo().GetFirstDeployed()),
LastDeployed: timeconv.String(fakeRls.GetInfo().GetLastDeployed()),
Chart: fakeRls.GetChart().Metadata.Name,
ChartVersion: fakeRls.GetChart().Metadata.Version,
Status: fakeRls.GetInfo().Status.Code.String(),
},
},
}
for i, tc := range tcs {
rls, err := tc.svc.DeleteRelease(context.Background(), "testCluster", "", true)
require.Equalf(t, tc.expectedErr, errors.Cause(err), "TC#%d: check errors", i+1)
if err == nil {
require.Equalf(t, tc.expectedRes, rls, "TC#%d: check results", i+1)
}
}
}
func TestService_Delete(t *testing.T) {
testCases := []struct {
repoErr error
}{
{
sgerrors.ErrNotFound,
},
{
nil,
},
}
for _, testCase := range testCases {
m := new(testutils.MockStorage)
m.On("Delete", context.Background(), mock.Anything, mock.Anything).
Return(testCase.repoErr)
service := NewService("", m, nil)
err := service.Delete(context.Background(), "key")
if err != testCase.repoErr {
t.Errorf("expected error %v actual %v", testCase.repoErr, err)
}
}
}
func TestResourcesGroupInfo(t *testing.T) {
testCases := []struct {
discoveryErr error
resourceErr error
resourcesLists []*metav1.APIResourceList
expectedGroupCount int
expectedErr error
}{
{
discoveryErr: sgerrors.ErrNotFound,
expectedErr: sgerrors.ErrNotFound,
},
{
resourceErr: sgerrors.ErrNotFound,
expectedErr: sgerrors.ErrNotFound,
},
{
resourcesLists: []*metav1.APIResourceList{
{
GroupVersion: "",
APIResources: []metav1.APIResource{
{
Name: "name-1",
Kind: "kind1",
},
{
Name: "name-2",
Kind: "kind2",
},
},
},
{
GroupVersion: "/",
APIResources: []metav1.APIResource{
{
Name: "name-2",
Kind: "kind2",
},
},
},
},
expectedGroupCount: 2,
},
}
for _, testCase := range testCases {
m := &mockServerResourceGetter{
resources: testCase.resourcesLists,
err: testCase.resourceErr,
}
svc := Service{
discoveryClientFn: func(k *model.Kube) (ServerResourceGetter, error) {
return m, testCase.discoveryErr
},
}
groups, err := svc.resourcesGroupInfo(&model.Kube{})
if errors.Cause(err) != testCase.expectedErr {
t.Errorf("expected error %v actual %v",
testCase.expectedErr, err)
}
if len(groups) != testCase.expectedGroupCount {
t.Errorf("expected group count %d actual %d",
testCase.expectedGroupCount, len(groups)) |
func TestListKubeResources(t *testing.T) {
testCases := []struct {
kubeData []byte
getkubeErr error
discoveryErr error
resourceErr error
resourcesLists []*metav1.APIResourceList
expectedGroupCount int
expectedErr error
}{
{
getkubeErr: sgerrors.ErrNotFound,
kubeData: nil,
expectedErr: sgerrors.ErrNotFound,
},
{
getkubeErr: nil,
kubeData: []byte(`{"name":"kube-name-1234"}`),
resourcesLists: []*metav1.APIResourceList{
{
GroupVersion: "",
APIResources: []metav1.APIResource{
{
Name: "name-1",
Kind: "kind1",
},
{
Name: "name-2",
Kind: "kind2",
},
},
},
{
GroupVersion: "/",
APIResources: []metav1.APIResource{
{
Name: "name-2",
Kind: "kind2",
},
},
},
},
},
{
getkubeErr: nil,
discoveryErr: sgerrors.ErrNotFound,
kubeData: []byte(`{"name":"kube-name-1234"}`),
expectedErr: sgerrors.ErrNotFound,
},
}
for _, testCase := range testCases {
m := new(testutils.MockStorage)
m.On("Get", context.Background(), mock.Anything, mock.Anything).
Return(testCase.kubeData, testCase.getkubeErr)
mockResourceGetter := &mockServerResourceGetter{
resources: testCase.resourcesLists,
err: testCase.resourceErr,
}
svc := Service{
storage: m,
discoveryClientFn: func(k *model.Kube) (ServerResourceGetter, error) {
return mockResourceGetter, testCase.discoveryErr
},
}
_, err := svc.ListKubeResources(context.Background(), "kube-name-1234")
if errors.Cause(err) != testCase.expectedErr {
t.Errorf("expected error %v actual %v",
testCase.expectedErr, err)
}
}
}
func TestService_GetKubeResources(t *testing.T) {
testCases := []struct {
kubeData []byte
getkubeErr error
discoveryErr error
resourceErr error
resourceName string
resourcesLists []*metav1.APIResourceList
expectedGroupCount int
expectedErr error
}{
{
getkubeErr: sgerrors.ErrNotFound,
kubeData: nil,
expectedErr: sgerrors.ErrNotFound,
},
{
getkubeErr: nil,
kubeData: []byte(`{"name":"kube-name-1234"}`),
resourcesLists: []*metav1.APIResourceList{
{
GroupVersion: "",
APIResources: []metav1.APIResource{
{
Name: "name-1",
Kind: "kind1",
},
{
Name: "name-2",
Kind: "kind2",
},
},
},
{
GroupVersion: "/",
APIResources: []metav1.APIResource{
{
Name: "name-2",
Kind: "kind2",
},
},
},
},
resourceName: "name-3",
expectedErr: sgerrors.ErrNotFound,
},
{
getkubeErr: nil,
discoveryErr: sgerrors.ErrNotFound,
kubeData: []byte(`{"name":"kube-name-1234"}`),
expectedErr: sgerrors.ErrNotFound,
},
}
for _, testCase := range testCases {
m := new(testutils.MockStorage)
m.On("Get", context.Background(), mock.Anything, mock.Anything).
Return(testCase.kubeData, testCase.getkubeErr)
mockResourceGetter := &mockServerResourceGetter{
resources: testCase.resourcesLists,
err: testCase.resourceErr,
}
svc := Service{
storage: m,
discoveryClientFn: func(k *model.Kube) (ServerResourceGetter, error) {
return mockResourceGetter, testCase.discoveryErr
},
}
_, err := svc.GetKubeResources(context.Background(),
"kube-name-1234", testCase.resourceName,
"namaspace", testCase.resourceName)
if errors.Cause(err) != testCase.expectedErr {
t.Errorf("expected error %v actual %v",
testCase.expectedErr, err)
}
}
}
func TestService_KubeConfigFor(t *testing.T) {
testCases := []struct {
user string
kubeData []byte
getkubeErr error
expectedErr error
}{
{
expectedErr: sgerrors.ErrNotFound,
},
{
user: "user",
expectedErr: sgerrors.ErrNotFound,
},
{
user: KubernetesAdminUser,
getkubeErr: fakeErrFileNotFound,
expectedErr: fakeErrFileNotFound,
},
{
user: KubernetesAdminUser,
kubeData: []byte(`{"masters":{"m":{"publicIp":"1.2.3.4"}}}`),
},
}
for i, tc := range testCases {
m := new(testutils.MockStorage)
m.On("Get", context.Background(), mock.Anything, mock.Anything).
Return(tc.kubeData, tc.getkubeErr)
svc := Service{
storage: m,
}
data, err := svc.KubeConfigFor(context.Background(), "kname", tc.user)
require.Equal(t, tc.expectedErr, errors.Cause(err), "TC#%d", i+1)
if err == nil {
require.NotNilf(t, data, "TC#%d", i+1)
}
}
}
func TestService_GetCerts(t *testing.T) {
testCases := []struct {
kname string
cname string
data []byte
getErr error
sshErr error
expectedErr error
}{
{
data: nil,
getErr: sgerrors.ErrNotFound,
expectedErr: sgerrors.ErrNotFound,
},
{
kname: "kube-name-1234",
data: []byte(`{"name":"kube-name-1234", "sshUser": "root", "sshKey": ""}`),
sshErr: ssh.ErrHostNotSpecified,
expectedErr: ssh.ErrHostNotSpecified,
},
}
prefix := DefaultStoragePrefix
for _, testCase := range testCases {
m := new(testutils.MockStorage)
m.On("Get", context.Background(), prefix, mock.Anything).
Return(testCase.data, testCase.getErr)
service := NewService(prefix, m, nil)
_, err := service.GetCerts(context.Background(),
testCase.kname, testCase.cname)
if testCase.expectedErr != errors.Cause(err) {
t.Errorf("Wrong error expected %v actual %v", testCase.expectedErr, err)
return
}
}
} | }
}
} |
chiihou.ts | import { Yaku } from './../yaku'
export class | extends Yaku {
hanOpen = null
hanClose = null
isYakuman = true
name = '地和'
englishName = 'chiihou'
isConditionMet(): boolean {
return false
}
}
| Chiihou |
image-api.js | import React from "react"
import Img from "gatsby-image"
import { rhythm } from "../utils/typography"
export default props => {
const assets = props.data.allContentfulAsset.edges
return (
<div>
<h1>Image API examples</h1>
<p>
Gatsby offers rich integration with{` `}
<a href="https://www.contentful.com/developers/docs/references/images-api/">
Contentful's Image API
</a>
</p>
<p>
Open Graph<em>i</em>QL on your own site to experiment with the following
options
</p>
<h2>Resize</h2>
{assets.map(({ node: { title, resize } }) => (
<img
key={resize.src}
alt={title}
src={resize.src}
width={resize.width}
height={resize.height}
style={{
marginRight: rhythm(1 / 2),
marginBottom: rhythm(1 / 2),
border: `1px solid tomato`,
}}
/>
))}
<h4>GraphQL query</h4>
<pre style={{ background: `#efeded`, padding: rhythm(3 / 4) }}>
<code
dangerouslySetInnerHTML={{
__html: `{
allContentfulAsset {
edges {
node {
title
resize(width: 100) {
src
width
height
}
}
}
}
}`,
}}
/>
</pre>
<h2>Responsive Resolution</h2>
<p>
If you make queries with <code>resolutions</code> then Gatsby
automatically generates images with 1x, 1.5x, 2x, and 3x versions so
your images look great on whatever screen resolution of device they're
on.
</p>
<p>
If you're on a retina class screen, notice how much sharper these images
are than the above "resized" images.
</p>
<p>
You should prefer this operator over <code>resize</code>.
</p>
{assets.map(({ node: { title, resolutions } }) => (
<Img
key={resolutions.src}
alt={title}
resolutions={resolutions}
backgroundColor
style={{
marginRight: rhythm(1 / 2),
marginBottom: rhythm(1 / 2),
border: `1px solid tomato`,
}}
/>
))}
<h4>GraphQL query</h4>
<pre style={{ background: `#efeded`, padding: rhythm(3 / 4) }}>
<code
dangerouslySetInnerHTML={{
__html: `{
allContentfulAsset {
edges {
node {
title
resolutions(width: 100) {
width
height
src
srcSet
}
}
}
}
}`, |
<h2>Resizing</h2>
<p>
On both resize and resolutions you can also add a{` `}
<code>height</code>
{` `}
argument to the GraphQL argument to crop the image to a certain size.
</p>
<p>
You can also set the{` `}
<a href="https://www.contentful.com/developers/docs/references/images-api/#/reference/resizing-&-cropping/change-the-resizing-behavior">
resizing behavior
</a>
{` `}
and{` `}
<a href="https://www.contentful.com/developers/docs/references/images-api/#/reference/resizing-&-cropping/specify-focus-area-for-resizing">
resizing focus area
</a>
</p>
{assets.map(({ node: { title, resizing } }) => (
<Img
key={resizing.src}
alt={title}
resolutions={resizing}
style={{
marginRight: rhythm(1 / 2),
marginBottom: rhythm(1 / 2),
border: `1px solid tomato`,
}}
/>
))}
<h4>GraphQL query</h4>
<pre style={{ background: `#efeded`, padding: rhythm(3 / 4) }}>
<code
dangerouslySetInnerHTML={{
__html: `{
allContentfulAsset {
edges {
node {
title
resolutions(width: 100, height: 100) {
width
height
src
srcSet
}
}
}
}
}`,
}}
/>
</pre>
<h2>Responsive Sizes</h2>
<p>
This GraphQL option allows you to generate responsive images that
automatically respond to different device screen resolution and widths.
E.g. a smartphone browser will download a much smaller image than a
desktop device.
</p>
<p>
Instead of specifying a width and height, with sizes you specify a{` `}
<code>maxWidth</code>, the max width the container of the images
reaches.
</p>
{assets.map(({ node: { title, sizes } }) => (
<Img
key={sizes.src}
alt={title}
sizes={sizes}
style={{
marginRight: rhythm(1 / 2),
marginBottom: rhythm(1 / 2),
border: `1px solid tomato`,
}}
/>
))}
<h4>GraphQL query</h4>
<pre style={{ background: `#efeded`, padding: rhythm(3 / 4) }}>
<code
dangerouslySetInnerHTML={{
__html: `{
allContentfulAsset {
edges {
node {
title
sizes(maxWidth: 613) {
sizes
src
srcSet
}
}
}
}
}`,
}}
/>
</pre>
</div>
)
}
export const pageQuery = graphql`
query ImageAPIExamples {
allContentfulAsset(filter: { node_locale: { eq: "en-US" } }) {
edges {
node {
title
resize(width: 100) {
src
width
height
}
}
}
}
}
` | }}
/>
</pre> |
label.js | import { __extends } from "tslib";
import ApplyResponsiveLabel from '../../../util/responsive/apply/label';
import { get } from '@antv/util';
var ApplyResponsiveLineLabel = /** @class */ (function (_super) {
__extends(ApplyResponsiveLineLabel, _super);
function | () {
return _super !== null && _super.apply(this, arguments) || this;
}
ApplyResponsiveLineLabel.prototype.getType = function () {
return get(this.plot.options, ['label', 'type'], 'point');
};
return ApplyResponsiveLineLabel;
}(ApplyResponsiveLabel));
export default function responsivePointLabel(layer) {
var responsiveTheme = layer.getResponsiveTheme();
new ApplyResponsiveLineLabel({
plot: layer,
responsiveTheme: responsiveTheme,
});
}
//# sourceMappingURL=label.js.map | ApplyResponsiveLineLabel |
userLoginUtils.ts | import { Models } from 'itmat-commons';
import { db } from '../database/database';
export class UserLoginUtils {
constructor() {
this.serialiseUser = this.serialiseUser.bind(this);
this.deserialiseUser = this.deserialiseUser.bind(this);
}
public serialiseUser(user: Models.UserModels.IUser, done: (err: any, id?: any) => void): void {
done(null, user.username);
}
public async deserialiseUser(username: string, done: (err: any, id?: any) => void): Promise<void> {
const user: Models.UserModels.IUserWithoutToken = await this._getUser(username);
done(null, user);
}
private async _getUser(username: string): Promise<Models.UserModels.IUserWithoutToken> {
return await db.collections!.users_collection.findOne({ deleted: null, username }, { projection: { _id: 0, deleted: 0, password: 0 } })!;
} | }
export const userLoginUtils = Object.freeze(new UserLoginUtils()); |
|
last_error.rs | /*
* Copyright (C) 2018 Kubos Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use eps_api::{EpsError, EpsResult};
use rust_i2c::Command;
/// Last Error
///
/// If an error has been generated after attempting to execute a user’s command
/// the value 0xFFFF is returned. To find out the details of the last error,
/// send the command 0x03 followed by the data byte 0x00. This will return
/// the code of the last error generated. The first two bytes returned represent
/// the Motherboard’s error code, the second two bytes represent the Daughterboard’s.
/// Bitflags struct holding last error information.
bitflags! {
#[derive(Default)]
pub struct ErrorCode: u8 {
/// CRC code does not match data
const BAD_CRC = 0x10;
/// Unknown command received
const UNKNOWN_COMMAND = 0x01;
/// Supplied data incorrect when processing command
const COMMAND_DATA_INCORRECT = 0x02;
/// Selected channel does not exist
const CHANNEL_DOES_NOT_EXIST = 0x03;
/// Selected channel is currently inactive
const CHANNEL_INACTIVE = 0x04;
/// A reset had to occur
const RESET_OCCURRED = 0x13;
/// There was an error with the ADC acquisition
const BAD_ADC_ACQUISITION = 0x14;
/// Reading from EEPROM generated an error
const FAIL_READING_EEPROM = 0x20;
/// Generic warning about an error on the internal SPI bus
const INTERNAL_SPI_ERROR = 0x30;
}
}
#[derive(Debug, Eq, PartialEq)]
pub struct LastError {
pub motherboard: ErrorCode,
pub daughterboard: Option<ErrorCode>,
}
pub fn parse(data: &[u8]) -> EpsResult<LastError> {
if data.len() == 2 {
Ok(LastError {
motherboard: ErrorCode::from_bits(data[1]).unwrap_or_default(),
daughterboard: None,
})
} else if data.len() == 4 {
Ok(LastError {
motherboard: ErrorCode::from_bits(data[1]).unwrap_or_default(),
daughterboard: ErrorCode::from_bits(data[3]),
})
} else {
return Err(EpsError::parsing_failure("Last Error"));
}
}
pub fn comman | Command {
Command {
cmd: 0x03,
data: vec![0x00],
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse_motherboard() {
assert_eq!(
LastError {
motherboard: ErrorCode::BAD_CRC,
daughterboard: None,
},
parse(&vec![0x00, 0x10]).unwrap()
);
}
#[test]
fn test_parse_motherboard_daughterboard() {
assert_eq!(
LastError {
motherboard: ErrorCode::COMMAND_DATA_INCORRECT,
daughterboard: Some(ErrorCode::CHANNEL_INACTIVE),
},
parse(&vec![0x00, 0x02, 0x00, 0x04]).unwrap()
);
}
#[test]
fn test_parse_bad_data_len() {
assert_eq!(
EpsError::parsing_failure("Last Error"),
parse(&vec![]).err().unwrap()
);
}
}
| d() -> |
document_permission_delete_example.go | package main
import (
"log"
"os"
"github.com/RJPearson94/twilio-sdk-go"
v1 "github.com/RJPearson94/twilio-sdk-go/service/sync/v1"
"github.com/RJPearson94/twilio-sdk-go/session/credentials"
)
var syncClient *v1.Sync
func init() {
creds, err := credentials.New(credentials.Account{
Sid: os.Getenv("TWILIO_ACCOUNT_SID"),
AuthToken: os.Getenv("TWILIO_AUTH_TOKEN"),
})
if err != nil {
log.Panicf("%s", err.Error())
}
syncClient = twilio.NewWithCredentials(creds).Sync.V1
}
func | () {
err := syncClient.
Service("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").
Document("ETXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").
Permission("test").
Delete()
if err != nil {
log.Panicf("%s", err.Error())
}
log.Println("Document Permission Deleted")
}
| main |
gift.view.tsx | import Taro, { Component } from "@tarojs/taro"
import { Block, View, Image } from "@tarojs/components"
import "./style.styl"
export default class | extends Component<{ data: any; onAction: any }> {
static defaultProps = {
data: {},
onAction: null
}
/**
* 点击事件
*/
handleClick = (e) => {
const { action } = e.currentTarget.dataset
const { onAction } = this.props
onAction(action)
}
render() {
const { data } = this.props
return (
<Block>
<View className="gift-view">
<View className="gift-wrapper">
<View className="avatar">
<Image className="icon" src={data.cover_image} />
</View>
<View className="description" style="display: inline-block;">
<View className="text">本礼品为邮寄类型</View>
<View className='text'>礼品: {data.title}</View>
<View className="text">{`配送公司: (${data.express_company || "无"})`}</View>
<View className="text">{`配送单号: (${data.express_no || "无"})`}</View>
</View>
<View className="close" data-action="close" onClick={this.handleClick}>
<Image className="icon" src={require("../../../../static/images/ic_close.png")} />
</View>
</View>
</View>
</Block>
)
}
}
| GiftView |
main.rs | // crmps - Template Creator
// Licensed Under Apache 2.0 | // Written By: (Abdul-Muiz-Iqbal)[https://www.github.com/Abdul-Muiz-Iqbal]
// API
// TODO:
// - Implement Most Methods
// - Refactor Already Implemented Methods
mod lib;
use std::process;
use lib::*;
fn main() {
let config = match Config::new(std::env::args()) {
Ok(val) => val,
Err(e) => {
eprintln!("Error: {}", e);
process::exit(0);
}
};
let error = match CommandNames::new(&config.command) {
CommandNames::Init => CommandBody::init(config),
CommandNames::Pack => CommandBody::zip(config),
CommandNames::Unpack => CommandBody::unzip(config),
CommandNames::Tag => CommandBody::tag(config),
CommandNames::Untag => CommandBody::untag(config),
CommandNames::AddTemplate => CommandBody::add(config),
CommandNames::RemoveTemplate => CommandBody::rem(config),
CommandNames::Search => CommandBody::search(config),
CommandNames::Nil => {
eprintln!("Command Not Found");
std::process::exit(0)
}
};
if let Err(e) = error {
eprintln!("Application Error: {:#?}", e);
std::process::exit(0);
}
} | |
batched_store_blob_access.go | package blobstore
import (
"context"
"io"
"sync"
"github.com/buildbarn/bb-storage/pkg/blobstore"
"github.com/buildbarn/bb-storage/pkg/util"
)
type pendingPutOperation struct {
digest *util.Digest
sizeBytes int64
r io.ReadCloser
}
type batchedStoreBlobAccess struct {
blobstore.BlobAccess
blobKeyFormat util.DigestKeyFormat
batchSize int
lock sync.Mutex
pendingPutOperations map[string]pendingPutOperation
}
// NewBatchedStoreBlobAccess is an adapter for BlobAccess that causes
// Put() operations to be enqueued. When a sufficient number of
// operations are enqueued, a FindMissing() call is generated to
// determine which blobs actually need to be stored. Writes for blobs
// with the same digest are merged.
//
// This adapter may be used by the worker to speed up the uploading
// phase of actions.
func | (blobAccess blobstore.BlobAccess, blobKeyFormat util.DigestKeyFormat, batchSize int) (blobstore.BlobAccess, func(ctx context.Context) error) {
ba := &batchedStoreBlobAccess{
BlobAccess: blobAccess,
blobKeyFormat: blobKeyFormat,
batchSize: batchSize,
pendingPutOperations: map[string]pendingPutOperation{},
}
return ba, func(ctx context.Context) error {
ba.lock.Lock()
defer ba.lock.Unlock()
return ba.flushLocked(ctx)
}
}
func (ba *batchedStoreBlobAccess) flushLocked(ctx context.Context) error {
// Determine which blobs are missing.
var digests []*util.Digest
for _, pendingPutOperation := range ba.pendingPutOperations {
digests = append(digests, pendingPutOperation.digest)
}
missing, err := ba.BlobAccess.FindMissing(ctx, digests)
if err != nil {
return err
}
// Upload the missing ones.
for _, digest := range missing {
key := digest.GetKey(ba.blobKeyFormat)
if pendingPutOperation, ok := ba.pendingPutOperations[key]; ok {
delete(ba.pendingPutOperations, key)
if err := ba.BlobAccess.Put(ctx, pendingPutOperation.digest, pendingPutOperation.sizeBytes, pendingPutOperation.r); err != nil {
return err
}
}
}
// Discard the others.
for _, pendingPutOperation := range ba.pendingPutOperations {
pendingPutOperation.r.Close()
}
ba.pendingPutOperations = map[string]pendingPutOperation{}
return nil
}
func (ba *batchedStoreBlobAccess) Put(ctx context.Context, digest *util.Digest, sizeBytes int64, r io.ReadCloser) error {
// First flush the existing files if there are too many pending.
ba.lock.Lock()
defer ba.lock.Unlock()
if len(ba.pendingPutOperations) >= ba.batchSize {
if err := ba.flushLocked(ctx); err != nil {
r.Close()
return err
}
}
// Discard duplicate writes.
key := digest.GetKey(ba.blobKeyFormat)
if _, ok := ba.pendingPutOperations[key]; ok {
return r.Close()
}
ba.pendingPutOperations[key] = pendingPutOperation{
digest: digest,
sizeBytes: sizeBytes,
r: r,
}
return nil
}
| NewBatchedStoreBlobAccess |
messagesUsage.qunit.js | /* global sinon, QUnit*/
sap.ui.require([
'sap/ui/model/Model',
'sap/ui/core/message/Message',
'sap/ui/core/library',
'sap/ui/core/ComponentContainer',
'sap/ui/model/json/JSONModel',
'sap/ui/core/UIComponent'
], function(Model, Message, library, ComponentContainer, JSONModel, UIComponent){
"use strict";
var oModel;
// create content div
var oDIV = document.createElement("div");
oDIV.id = "content";
document.body.appendChild(oDIV);
function spyDataState(oControl, fnTest) {
if (oControl.refreshDataState) {
var fnRefresh = oControl.refreshDataState;
oControl.refreshDataState = function(sName, oDataState) {
sap.m.Input.prototype.refreshDataState.apply(oControl, arguments);
fnTest(sName, oDataState);
oControl.refreshDataState = fnRefresh;
};
}
}
//create some components for testing
var oCompCont = new ComponentContainer("CompCont", {
name: "components",
id: "myMessageTest1"
});
var oCompCont2 = new ComponentContainer("CompCont2", {
name: "components.enabled",
id: "myMessageTest2",
handleValidation: true
});
var oCompCont3 = new ComponentContainer("CompCont3", {
name: "components.disabled",
id: "myMessageTest3",
handleValidation: true
});
oCompCont.placeAt("content");
oCompCont2.placeAt("content");
oCompCont3.placeAt("content");
var initModel = function(sType) {
if (sType === "json") {
oModel = new JSONModel();
var oData = {
form: {
firstname: "Fritz",
lastname: "Heiner",
street: "im",
nr: 1,
zip: "12345"
}
};
oModel.setData(oData);
}
sap.ui.getCore().setModel(oModel);
};
QUnit.module("MessageManager components", {
beforeEach : function() {
initModel("json");
},
afterEach : function() {
oModel.destroy();
}
});
QUnit.test("componentEnabled", function(assert) {
var done = assert.async(); | assert.ok(oCompZip.getValueState() === library.ValueState.Error, 'Input: ValueState set correctly');
assert.ok(oCompZip.getValueStateText() === 'Enter a value with no more than 5 characters', 'Input: ValueStateText set correctly');
});
var oCoreValHandler = function(oEvent) {
assert.ok(false,"should never be called");
};
sap.ui.getCore().attachValidationError(oCoreValHandler);
oCompZip.setValue('123456');
jQuery.sap.delayedCall(0, this, function() {
spyDataState(oCompZip, function(sName, oDataState) {
assert.ok(oDataState.getMessages().length == 0, 'Validation Message deleted');
assert.ok(oCompZip.getValueState() === library.ValueState.None, 'Input: ValueState set correctly');
assert.ok(oCompZip.getValueStateText() === '', 'Input: ValueStateText set correctly');
done();
});
oCompZip.setValue('12345');
sap.ui.getCore().detachValidationError(oCoreValHandler);
});
});
QUnit.test("componentDisabled", function(assert) {
var oMessageManager = sap.ui.getCore().getMessageManager();
var oMessageModel = oMessageManager.getMessageModel();
var oCompZip = sap.ui.getCore().byId("zip_disabled");
var oValHandler = function(oEvent) {
if (oEvent.getParameter("dataState").getMessages() && oEvent.getParameter("dataState").getMessages().length > 0) {
assert.ok(false,"should never be called");
}
};
oCompZip.getBinding("value").attachDataStateChange(oValHandler);
sap.ui.getCore().attachValidationError(oValHandler);
oCompZip.setValue('123456');
assert.ok(jQuery.isPlainObject(oMessageModel.getObject('/')) || oMessageModel.getObject('/').length == 0, 'No Messages in Model');
sap.ui.getCore().detachValidationError(oValHandler);
});
QUnit.test("component handle validation undefined", function(assert) {
var oMessageManager = sap.ui.getCore().getMessageManager();
var oMessageModel = oMessageManager.getMessageModel();
var oCompZip = sap.ui.getCore().byId("zip");
var oChangeHandler = function(oEvent) {
if (oEvent.getParameter("dataState").getMessages() && oEvent.getParameter("dataState").getMessages().length > 0) {
assert.ok(false,"should never be called");
}
};
var oValHandler = function(oEvent) {
assert.ok(true,oEvent.sId);
sap.ui.getCore().detachValidationError(oValHandler);
};
oCompZip.getBinding("value").attachDataStateChange(oChangeHandler);
sap.ui.getCore().attachValidationError(oValHandler);
oCompZip.setValue('123456');
assert.ok(jQuery.isPlainObject(oMessageModel.getObject('/')) || oMessageModel.getObject('/').length == 0, 'No Messages in Model');
});
QUnit.module("Component: handleValidation / registerObject");
QUnit.test("Metadata: n/a, instance: n/a", function(assert) {
var sComponentName = "sap.ui.test.handlevalidation.na.na";
var oMessageManager = sap.ui.getCore().getMessageManager();
var oRegisterObjectSpy = this.spy(oMessageManager, "registerObject");
jQuery.sap.declare(sComponentName + ".Component");
UIComponent.extend(sComponentName + ".Component", {
metadata: {}
});
var oComponent = sap.ui.component({
name: sComponentName
});
sinon.assert.callCount(oRegisterObjectSpy, 0);
oComponent.destroy();
});
QUnit.test("Metadata: n/a, instance: false", function(assert) {
var sComponentName = "sap.ui.test.handlevalidation.na.false";
var oMessageManager = sap.ui.getCore().getMessageManager();
var oRegisterObjectSpy = this.spy(oMessageManager, "registerObject");
jQuery.sap.declare(sComponentName + ".Component");
UIComponent.extend(sComponentName + ".Component", {
metadata: {}
});
var oComponent = sap.ui.component({
name: sComponentName,
handleValidation: false
});
sinon.assert.callCount(oRegisterObjectSpy, 0);
oComponent.destroy();
});
QUnit.test("Metadata: n/a, instance: true", function(assert) {
var sComponentName = "sap.ui.test.handlevalidation.na.true";
var oMessageManager = sap.ui.getCore().getMessageManager();
var oRegisterObjectSpy = this.spy(oMessageManager, "registerObject");
jQuery.sap.declare(sComponentName + ".Component");
UIComponent.extend(sComponentName + ".Component", {
metadata: {}
});
var oComponent = sap.ui.component({
name: sComponentName,
handleValidation: true
});
sinon.assert.callCount(oRegisterObjectSpy, 1);
sinon.assert.calledWithExactly(oRegisterObjectSpy, oComponent, true);
});
QUnit.test("Metadata: false, instance: false", function(assert) {
var sComponentName = "sap.ui.test.handlevalidation.false.false";
var oMessageManager = sap.ui.getCore().getMessageManager();
var oRegisterObjectSpy = this.spy(oMessageManager, "registerObject");
jQuery.sap.declare(sComponentName + ".Component");
UIComponent.extend(sComponentName + ".Component", {
metadata: {
handleValidation: false
}
});
var oComponent = sap.ui.component({
name: sComponentName,
handleValidation: false
});
sinon.assert.callCount(oRegisterObjectSpy, 1);
sinon.assert.calledWithExactly(oRegisterObjectSpy, oComponent, false);
});
QUnit.test("Metadata: false, instance: n/a", function(assert) {
var sComponentName = "sap.ui.test.handlevalidation.false.na";
var oMessageManager = sap.ui.getCore().getMessageManager();
var oRegisterObjectSpy = this.spy(oMessageManager, "registerObject");
jQuery.sap.declare(sComponentName + ".Component");
UIComponent.extend(sComponentName + ".Component", {
metadata: {
handleValidation: false
}
});
var oComponent = sap.ui.component({
name: sComponentName
});
sinon.assert.callCount(oRegisterObjectSpy, 1);
sinon.assert.calledWithExactly(oRegisterObjectSpy, oComponent, false);
});
QUnit.test("Metadata: false, instance: true", function(assert) {
var sComponentName = "sap.ui.test.handlevalidation.false.true";
var oMessageManager = sap.ui.getCore().getMessageManager();
var oRegisterObjectSpy = this.spy(oMessageManager, "registerObject");
jQuery.sap.declare(sComponentName + ".Component");
UIComponent.extend(sComponentName + ".Component", {
metadata: {
handleValidation: false
}
});
var oComponent = sap.ui.component({
name: sComponentName,
handleValidation: true
});
sinon.assert.callCount(oRegisterObjectSpy, 1);
sinon.assert.calledWithExactly(oRegisterObjectSpy, oComponent, false);
});
QUnit.test("Metadata: true, instance: true", function(assert) {
var sComponentName = "sap.ui.test.handlevalidation.true.true";
var oMessageManager = sap.ui.getCore().getMessageManager();
var oRegisterObjectSpy = this.spy(oMessageManager, "registerObject");
jQuery.sap.declare(sComponentName + ".Component");
UIComponent.extend(sComponentName + ".Component", {
metadata: {
handleValidation: true
}
});
var oComponent = sap.ui.component({
name: sComponentName,
handleValidation: true
});
sinon.assert.callCount(oRegisterObjectSpy, 1);
sinon.assert.calledWithExactly(oRegisterObjectSpy, oComponent, true);
});
QUnit.test("Metadata: true, instance: n/a", function(assert) {
var sComponentName = "sap.ui.test.handlevalidation.true.na";
var oMessageManager = sap.ui.getCore().getMessageManager();
var oRegisterObjectSpy = this.spy(oMessageManager, "registerObject");
jQuery.sap.declare(sComponentName + ".Component");
UIComponent.extend(sComponentName + ".Component", {
metadata: {
handleValidation: true
}
});
var oComponent = sap.ui.component({
name: sComponentName
});
sinon.assert.callCount(oRegisterObjectSpy, 1);
sinon.assert.calledWithExactly(oRegisterObjectSpy, oComponent, true);
});
QUnit.test("Metadata: true, instance: false", function(assert) {
var sComponentName = "sap.ui.test.handlevalidation.true.false";
var oMessageManager = sap.ui.getCore().getMessageManager();
var oRegisterObjectSpy = this.spy(oMessageManager, "registerObject");
jQuery.sap.declare(sComponentName + ".Component");
UIComponent.extend(sComponentName + ".Component", {
metadata: {
handleValidation: true
}
});
var oComponent = sap.ui.component({
name: sComponentName,
handleValidation: false
});
sinon.assert.callCount(oRegisterObjectSpy, 1);
sinon.assert.calledWithExactly(oRegisterObjectSpy, oComponent, true);
});
QUnit.test("Model: checkMessages", function(assert) {
var oCheckMessagesSpy = sinon.spy(Model.prototype, "checkMessages");
var oModel = new Model();
var mMessages = {"foo": {"key1": "value1"}};
oModel.mMessages = mMessages;
oModel.setMessages(mMessages);
assert.equal(oCheckMessagesSpy.callCount, 0, "No changes detected - Skip check messages");
oModel.setMessages({"foo": {"key2": "value2"}});
assert.equal(oCheckMessagesSpy.callCount, 1, "Changes detected - Check messages");
oModel.setMessages();
assert.equal(oCheckMessagesSpy.callCount, 2, "Changes detected - Check messages");
assert.deepEqual(oModel.mMessages, {}, "Model messages cleared");
});
QUnit.test("Model: Refresh with force update", function(assert) {
var done = assert.async();
var oModel = new Model();
var oMessage = new Message({message: "myMessage", type: library.MessageType.Error});
oModel.setMessages({"/test": oMessage});
oModel.attachMessageChange(function(oEvent){
assert.strictEqual(oMessage, oEvent.getParameter("oldMessages")[0]);
done();
});
oModel.refresh(true);
});
}); | var oCompZip = sap.ui.getCore().byId("zip_enabled");
spyDataState(oCompZip, function(sName, oDataState) {
assert.ok(oDataState.getMessages().length == 1, 'Format Message created'); |
clustering.py | """
Copyright 2022 RICHARD TJÖRNHAMMAR
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import typing
import sys
try :
from numba import jit
bUseNumba = True
except ImportError :
print ( "ImportError:"," NUMBA. WILL NOT USE IT")
bUseNumba = False
except OSError:
print ( "OSError:"," NUMBA. WILL NOT USE IT")
bUseNumba = False
# THE FOLLOWING KMEANS ALGORITHM IS THE AUTHOR OWN LOCAL VERSION
if bUseNumba :
@jit(nopython=True)
def seeded_kmeans( dat:np.array, cent:np.array ) :
#
# PYTHON ADAPTATION OF MY C++ CODE THAT CAN BE FOUND IN
# https://github.com/richardtjornhammar/RichTools/blob/master/src/cluster.cc
# AROUND LINE 2345
# AGAIN CONSIDER USING THE C++ VERSION SINCE IT IS ALOT FASTER
# HERE WE SPEED IT UP USING NUMBA IF THE USER HAS IT INSTALLED AS A MODULE
#
NN , MM = np.shape ( dat )
KK , LL = np.shape ( cent )
if not LL == MM :
print ( 'WARNING DATA FORMAT ERROR. NON COALESCING COORDINATE AXIS' )
labels = [ int(z) for z in np.zeros(NN) ]
w = labels
counts = np.zeros(KK)
tmp_ce = np.zeros(KK*MM).reshape(KK,MM)
old_error , error , TOL = 0. , 1. , 1.0E-10
while abs ( error - old_error ) > TOL :
old_error = error
error = 0.
counts = counts * 0.
tmp_ce = tmp_ce * 0.
# START BC
for h in range ( NN ) :
min_distance = 1.0E30
for i in range ( KK ) :
distance = np.sum( ( dat[h]-cent[i] )**2 )
if distance < min_distance :
labels[h] = i
min_distance = distance
tmp_ce[labels[h]] += dat[ h ]
counts[labels[h]] += 1.0
error += min_distance
# END BC
for i in range ( KK ) :
if counts[i]>0:
cent[i] = tmp_ce[i]/counts[i]
centroids = cent
return ( labels , centroids )
else :
def seeded_kmeans( dat:np.array, cent:np.array ) :
#
# SLOW SLUGGISH KMEANS WITH A DUBBLE FOR LOOP
# IN PYTHON! WOW! SUCH SPEED!
#
NN , MM = np.shape ( dat )
KK , LL = np.shape ( cent )
if not LL == MM :
print ( 'WARNING DATA FORMAT ERROR. NON COALESCING COORDINATE AXIS' )
labels = [ int(z) for z in np.zeros(NN) ]
w = labels
counts = np.zeros(KK)
tmp_ce = np.zeros(KK*MM).reshape(KK,MM)
old_error , error , TOL = 0. , 1. , 1.0E-10
while abs ( error - old_error ) > TOL :
old_error = error
error = 0.
counts = counts * 0.
tmp_ce = tmp_ce * 0.
# START BC
for h in range ( NN ) :
min_distance = 1.0E30
for i in range ( KK ) :
distance = np.sum( ( dat[h]-cent[i] )**2 )
if distance < min_distance :
labels[h] = i
min_distance = distance
tmp_ce[labels[h]] += dat[ h ]
counts[labels[h]] += 1.0
error += min_distance
# END BC
for i in range ( KK ) :
if counts[i]>0:
cent[i] = tmp_ce[i]/counts[i]
centroids = cent
return ( labels , centroids )
if bUseNumba :
@jit(nopython=True)
def connectivity ( B:np.array , val:float , bVerbose:bool = False ) :
description = """ This is a cutoff based clustering algorithm. The intended use is to supply a distance matrix and a cutoff value (then becomes symmetric positive). For a small distance cutoff, you should see all the parts of the system and for a large distance cutoff, you should see the entire system. It has been employed for statistical analysis work as well as the original application where it was employed to segment molecular systems."""
if bVerbose :
print ( "CONNECTIVITY CLUSTERING OF ", np.shape(B), " MATRIX" )
# PYTHON ADAPTATION OF MY C++ CODE THAT CAN BE FOUND IN
# https://github.com/richardtjornhammar/RichTools/blob/master/src/cluster.cc
# AROUND LINE 2277
# CONSIDER COMPILING AND USING THAT AS A MODULE INSTEAD OF THIS SINCE IT IS
# A LOT FASTER
# FOR A DESCRIPTION READ PAGE 30 (16 INTERNAL NUMBERING) of:
# https://kth.diva-portal.org/smash/get/diva2:748464/FULLTEXT01.pdf
#
# https://github.com/richardtjornhammar/RichTools/blob/master/src/cluster.cc
# ADDED TO RICHTOOLS HERE: https://github.com/richardtjornhammar/RichTools/commit/74b35df9c623bf03570707a24eafe828f461ed90#diff-25a6634263c1b1f6fc4697a04e2b9904ea4b042a89af59dc93ec1f5d44848a26
# CONNECTIVITY SEARCH FOR (connectivity) CONNECTIVITY
#
nr_sq,mr_sq = np.shape(B)
if nr_sq != mr_sq :
print ( 'ERROR: FAILED' )
N = mr_sq
res , nvisi, s, NN, ndx, C = [0], [0], [0], [0], [0], 0
res .append(0)
for i in range(N) :
nvisi.append(i+1)
res.append(0); res.append(0)
ndx.append(i)
res = res[1:]
nvisi = nvisi[1:]
ndx = ndx[1:]
while ( len(ndx)>0 ) :
i = ndx[-1] ; ndx = ndx[:-1]
NN = []
if ( nvisi[i]>0 ) :
C-=1
for j in range(N) :
if ( B[i,j]<=val ) :
NN.append(j)
while ( len(NN)>0 ) :
# back pop_back
k = NN[-1]; NN = NN[:-1]
nvisi[k] = C
for j in range(N):
if ( B[j,k]<=val ) :
for q in range(N) :
if ( nvisi[q] == j+1 ) :
NN.append(q)
if bVerbose : # VERBOSE
print ( "INFO "+str(-1*C) +" clusters" )
Nc = [ 0 for i in range(-1*C) ]
for q in range(N) :
res[ q*2+1 ] = q;
res[ q*2 ] = nvisi[q]-C;
Nc [res[q*2]]+= 1;
if bVerbose :
print ( " "+str(res[q*2])+" "+str(res[2*q+1]) )
if bVerbose :
for i in range(-1*C) :
print( "CLUSTER " +str(i)+ " HAS " + str(Nc[i]) + " ELEMENTS")
return ( Nc , np.array(res[:-1]).reshape(-1,2) )
else :
def connectivity ( B:np.array , val:float , bVerbose:bool = False ) :
description="""
This is a cutoff based clustering algorithm. The intended use is to supply a distance matrix and a cutoff value (then becomes symmetric positive). For a small distanc>
"""
if bVerbose :
print ( "CONNECTIVITY CLUSTERING OF ", np.shape(B), " MATRIX" )
# PYTHON ADAPTATION OF MY C++ CODE THAT CAN BE FOUND IN
# https://github.com/richardtjornhammar/RichTools/blob/master/src/cluster.cc
# AROUND LINE 2277
# CONSIDER COMPILING AND USING THAT AS A MODULE INSTEAD OF THIS SINCE IT IS
# A LOT FASTER
# FOR A DESCRIPTION READ PAGE 30 (16 INTERNAL NUMBERING) of:
# https://kth.diva-portal.org/smash/get/diva2:748464/FULLTEXT01.pdf
#
nr_sq,mr_sq = np.shape(B)
if nr_sq != mr_sq :
print ( 'ERROR' )
return ( -1 )
N = mr_sq
res , nvisi, s, NN, ndx, C = [], [], [], [], [], 0
res .append(0)
for i in range(N) :
nvisi.append(i+1)
res.append(0); res.append(0)
ndx.append(i)
while ( len(ndx)>0 ) :
i = ndx[-1] ; ndx = ndx[:-1]
NN = []
if ( nvisi[i]>0 ) :
C-=1
for j in range(N) :
if ( B[i,j]<=val ) :
NN.append(j)
while ( len(NN)>0 ) :
# back pop_back
k = NN[-1]; NN = NN[:-1]
nvisi[k] = C
for j in range(N):
if ( B[j,k]<=val ) :
for q in range(N) :
if ( nvisi[q] == j+1 ) :
NN.append(q)
if bVerbose : # VERBOSE
print ( "INFO "+str(-1*C) +" clusters" )
Nc = [ 0 for i in range(-1*C) ]
for q in range(N) :
res[ q*2+1 ] = q;
res[ q*2 ] = nvisi[q]-C;
Nc [res[q*2]]+= 1;
if bVerbose :
print ( " "+str(res[q*2])+" "+str(res[2*q+1]) )
if bVerbose:
for i in range(-1*C) :
print( "CLUSTER " +str(i)+ " HAS " + str(Nc[i]) + " ELEMENTS")
return ( Nc , np.array(res[:-1]).reshape(-1,2) )
if bUseNumba :
@jit(nopython=True)
def connectedness ( distm:np.array , alpha:float , n_connections:int=1 ) -> list :
#
# AN ALTERNATIVE METHOD
# DOES THE SAME THING AS THE CONNECTIVITY CODE IN MY
# CLUSTERING MODULE (in src/impetuous/clustering.py )
# OR IN https://github.com/richardtjornhammar/RichTools/blob/master/src/cluster.cc
# https://github.com/richardtjornhammar/RichTools/commit/74b35df9c623bf03570707a24eafe828f461ed90#diff-25a6634263c1b1f6fc4697a04e2b9904ea4b042a89af59dc93ec1f5d44848a26
# CONNECTIVITY SEARCH FOR (connectivity) CONNECTIVITY
#
# THIS ROUTINE RETURNS A LIST BELONGING TO THE CLUSTERS
# WITH THE SET OF INDICES THAT MAPS TO THE CLUSTER
#
if len ( distm.shape ) < 2 :
print ( 'PLEASE SUBMIT A SQUARE DISTANCE MATRIX' )
def b2i ( a:list ) -> list :
return ( [ i for b,i in zip(a,range(len(a))) if b ] )
def f2i ( a:list,alf:float ) -> list :
return ( b2i( a<=alf ) )
L = []
for a in distm :
bAdd = True
ids = set( f2i(a,alpha) )
for i in range(len(L)) :
if len( L[i]&ids ) >= n_connections :
L[i] = L[i] | ids
bAdd = False
break
if bAdd and len(ids) >= n_connections :
L .append( ids )
return ( L )
else :
def connectedness ( distm:np.array , alpha:float , n_connections:int=1 ) -> list :
#
# AN ALTERNATIVE METHOD
# DOES THE SAME THING AS THE CONNECTIVITY CODE IN MY
# CLUSTERING MODULE (in src/impetuous/clustering.py )
# OR IN https://github.com/richardtjornhammar/RichTools/blob/master/src/cluster.cc
# as of commit https://github.com/richardtjornhammar/RichTools/commit/76201bb07687017ae16a4e57cb1ed9fd8c394f18 2016
# CONNECTIVITY SEARCH FOR (connectivity) CONNECTIVITY
#
# THIS ROUTINE RETURNS A LIST BELONGING TO THE CLUSTERS
# WITH THE SET OF INDICES THAT MAPS TO THE CLUSTER
#
if len ( distm.shape ) < 2 :
print ( 'PLEASE SUBMIT A SQUARE DISTANCE MATRIX' )
def b2i ( a:list ) -> list :
return ( [ i for b,i in zip(a,range(len(a))) if b ] )
def f2i ( a:list,alf:float ) -> list :
return ( b2i( a<=alf ) )
L = []
for a in distm :
bAdd = True
ids = set( f2i(a,alpha) )
for i in range(len(L)) :
if len( L[i]&ids ) >= n_connections :
L[i] = L[i] | ids
bAdd = False
break
if bAdd and len(ids) >= n_connections :
L .append( ids )
return ( L )
def dbscan ( coordinates:np.array = None , distance_matrix:np.array = None ,
eps:float = None, minPts:int = None , bVerbose:bool = False ) -> dict :
def absolute_coordinates_to_distance_matrix ( Q:np.array , power:int=2 , bInvPow:bool=False ) -> np.array :
# UNUSED FALLBACK
D |
if bVerbose :
print ( "THIS IMPLEMENTATION FOR DBSCAN" )
print ( "ASSESSMENT OF NOISE DIFFERS FROM" )
print ( "THE IMPLEMENTATION FOUND IN SKLEARN" )
print ( "ASSUMES LINEAR DISTANCES, NOT SQUARED" )
#
# FOR A DESCRIPTION OF THE CONNECTIVITY READ PAGE 30 (16 INTERNAL NUMBERING) of:
# https://kth.diva-portal.org/smash/get/diva2:748464/FULLTEXT01.pdf
#from impetuous.clustering import absolute_coordinates_to_distance_matrix
#from impetuous.clustering import connectivity
import operator
if not operator.xor( coordinates is None , distance_matrix is None ) :
print ( "ONLY SUPPLY A SINGE DATA FRAME OR A DISTANCE MATRIX" )
print ( "dbscan FAILED" )
print ( "DATA MATRICES NEEDS TO BE SPECIFIED WITH \" distance_matrix = ... \" " )
exit(1)
if distance_matrix is None :
from graphtastic.fit import absolute_coordinates_to_distance_matrix
distance_matrix_ = absolute_coordinates_to_distance_matrix ( coordinates )
eps = eps**2.0
else :
distance_matrix_ = distance_matrix
isNoise = np.sum(distance_matrix_<eps,0)-1 < minPts
i_ = 0
for ib in isNoise :
if ib :
distance_matrix_ [ i_] = ( 1+eps )*10.0
distance_matrix_.T[i_] = ( 1+eps )*10.0
distance_matrix_[i_][i_] = 0.
i_ = i_+1
clustercontent , clustercontacts = connectivity ( distance_matrix_ , eps )
return ( {'cluster content': clustercontent, 'clusterid-particleid' : clustercontacts, 'is noise':isNoise} )
def reformat_dbscan_results ( results:dict ) -> dict :
if True :
clusters = {}
for icontent in range(len(results['cluster content'])) :
content = results[ 'cluster content' ][ icontent ]
for c in results [ 'clusterid-particleid' ] :
if c[0] == icontent :
if results[ 'is noise' ][c[1]] :
icontent=-1
if icontent in clusters:
clusters[ icontent ] .append( c[1] )
else :
clusters[ icontent ] = [ c[1] ]
return ( clusters )
| P = np.array( [ np.sum((np.array(p)-np.array(q))**power) for p in Q for q in Q] ).reshape(np.shape(Q)[0],np.shape(Q)[0])
if bInvPow :
DP = DP**(1.0/power)
return ( DP )
|
04b-bind-body.go | package main
import (
"fmt"
"github.com/gin-gonic/gin"
"github.com/guonaihong/gout"
"time"
)
// 可以使用BindBody解析返回的非结构化http body(结构化指json/xml/yaml等)
// 可以做到基础类型自动绑定, 下面是string/[]byte/int的example
func bindString() {
// 1.解析string
fmt.Printf("\n\n=========1. bind string=====\n")
s := ""
err := gout.GET(":8080/rsp/body/string").
Debug(true).
BindBody(&s).
Do()
if err != nil {
fmt.Printf("%s\n", err)
return
}
fmt.Printf("need(string) got(%s)\n", s)
}
func bindBytes() {
// 2.解析[]byte
fmt.Printf("\n\n=========2. bind []byte=====\n")
var b []byte
err := gout.GET(":8080/rsp/body/bytes").
Debug(true).
BindBody(&b).
Do()
if err != nil {
fmt.Printf("%s\n", err)
return
}
fmt.Printf("need(bytes) got(%s)\n", b)
}
func bindInt() {
// 3.解析int
fmt.Printf("\n\n=========3. bind int=====\n")
i := 0
err := gout.GET(":8080/rsp/body/int").
Debug(true).
BindBody(&i).
Do()
if err != nil {
fmt.Printf("%s\n", err)
return
}
fmt.Printf("need(65535) got(%d)\n", i)
//BindBody支持的更多基础类型有int, int8, int16, int32, int64
//uint, uint8, uint16, uint32, uint64
//float32, float64
}
func main() {
go server()
time.Sleep(time.Millisecond)
bindString()
bindBytes()
bindInt()
}
func server() {
router := gin.New()
router.GET("/rsp/body/bytes", func(c *gin.Context) {
c.String(200, "bytes" | )
})
router.GET("/rsp/body/string", func(c *gin.Context) {
c.String(200, "string")
})
router.GET("/rsp/body/int", func(c *gin.Context) {
c.String(200, "65535")
})
router.Run()
}
|
|
stringer.go | // Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Stringer is a tool to automate the creation of methods that satisfy the fmt.Stringer
// interface. Given the name of a (signed or unsigned) integer type T that has constants
// defined, stringer will create a new self-contained Go source file implementing
// func (t T) String() string
// The file is created in the same package and directory as the package that defines T.
// It has helpful defaults designed for use with go generate.
//
// Stringer works best with constants that are consecutive values such as created using iota,
// but creates good code regardless. In the future it might also provide custom support for
// constant sets that are bit patterns.
//
// For example, given this snippet,
//
// package painkiller
//
// type Pill int
//
// const (
// Placebo Pill = iota
// Aspirin
// Ibuprofen
// Paracetamol
// Acetaminophen = Paracetamol
// )
//
// running this command
//
// stringer -type=Pill
//
// in the same directory will create the file pill_string.go, in package painkiller,
// containing a definition of
//
// func (Pill) String() string
//
// That method will translate the value of a Pill constant to the string representation
// of the respective constant name, so that the call fmt.Print(painkiller.Aspirin) will
// print the string "Aspirin".
//
// Typically this process would be run using go generate, like this:
//
// //go:generate stringer -type=Pill
//
// If multiple constants have the same value, the lexically first matching name will
// be used (in the example, Acetaminophen will print as "Paracetamol").
//
// With no arguments, it processes the package in the current directory.
// Otherwise, the arguments must name a single directory holding a Go package
// or a set of Go source files that represent a single Go package.
//
// The -type flag accepts a comma-separated list of types so a single run can
// generate methods for multiple types. The default output file is t_string.go,
// where t is the lower-cased name of the first type listed. It can be overridden
// with the -output flag.
//
package main
import (
"bytes"
"flag"
"fmt"
"go/ast"
"go/build"
"go/format"
"go/parser"
"go/token"
"io/ioutil"
"log"
"os"
"path/filepath"
"sort"
"strings"
"golang.org/x/tools/go/exact"
"golang.org/x/tools/go/types"
_ "golang.org/x/tools/go/gcimporter"
)
var (
typeNames = flag.String("type", "", "comma-separated list of type names; must be set")
output = flag.String("output", "", "output file name; default srcdir/<type>_string.go")
)
// Usage is a replacement usage function for the flags package.
func Usage() {
fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
fmt.Fprintf(os.Stderr, "\tstringer [flags] -type T [directory]\n")
fmt.Fprintf(os.Stderr, "\tstringer [flags[ -type T files... # Must be a single package\n")
fmt.Fprintf(os.Stderr, "For more information, see:\n")
fmt.Fprintf(os.Stderr, "\thttp://godoc.org/golang.org/x/tools/cmd/stringer\n")
fmt.Fprintf(os.Stderr, "Flags:\n")
flag.PrintDefaults()
os.Exit(2)
}
func main() {
log.SetFlags(0)
log.SetPrefix("stringer: ")
flag.Usage = Usage
flag.Parse()
if len(*typeNames) == 0 {
flag.Usage()
os.Exit(2)
}
types := strings.Split(*typeNames, ",")
// We accept either one directory or a list of files. Which do we have?
args := flag.Args()
if len(args) == 0 {
// Default: process whole package in current directory.
args = []string{"."}
}
// Parse the package once.
var (
dir string
g Generator
)
if len(args) == 1 && isDirectory(args[0]) {
dir = args[0]
g.parsePackageDir(args[0])
} else {
dir = filepath.Dir(args[0])
g.parsePackageFiles(args)
}
// Print the header and package clause.
g.Printf("// generated by stringer %s; DO NOT EDIT\n", strings.Join(os.Args[1:], " "))
g.Printf("\n")
g.Printf("package %s", g.pkg.name)
g.Printf("\n")
g.Printf("import \"fmt\"\n") // Used by all methods.
// Run generate for each type.
for _, typeName := range types {
g.generate(typeName)
}
// Format the output.
src := g.format()
// Write to file.
outputName := *output
if outputName == "" {
baseName := fmt.Sprintf("%s_string.go", types[0])
outputName = filepath.Join(dir, strings.ToLower(baseName))
}
err := ioutil.WriteFile(outputName, src, 0644)
if err != nil {
log.Fatalf("writing output: %s", err)
}
}
// isDirectory reports whether the named file is a directory.
func isDirectory(name string) bool {
info, err := os.Stat(name)
if err != nil {
log.Fatal(err)
}
return info.IsDir()
}
// Generator holds the state of the analysis. Primarily used to buffer
// the output for format.Source.
type Generator struct {
buf bytes.Buffer // Accumulated output.
pkg *Package // Package we are scanning.
}
func (g *Generator) Printf(format string, args ...interface{}) {
fmt.Fprintf(&g.buf, format, args...)
}
// File holds a single parsed file and associated data.
type File struct {
pkg *Package // Package to which this file belongs.
file *ast.File // Parsed AST.
// These fields are reset for each type being generated.
typeName string // Name of the constant type.
values []Value // Accumulator for constant values of that type.
}
type Package struct {
dir string
name string
defs map[*ast.Ident]types.Object
files []*File
typesPkg *types.Package
}
// parsePackageDir parses the package residing in the directory.
func (g *Generator) parsePackageDir(directory string) {
pkg, err := build.Default.ImportDir(directory, 0)
if err != nil {
log.Fatalf("cannot process directory %s: %s", directory, err)
}
var names []string
names = append(names, pkg.GoFiles...)
names = append(names, pkg.CgoFiles...)
// TODO: Need to think about constants in test files. Maybe write type_string_test.go
// in a separate pass? For later.
// names = append(names, pkg.TestGoFiles...) // These are also in the "foo" package.
names = append(names, pkg.SFiles...)
names = prefixDirectory(directory, names)
g.parsePackage(directory, names, nil)
}
// parsePackageFiles parses the package occupying the named files.
func (g *Generator) parsePackageFiles(names []string) {
g.parsePackage(".", names, nil)
}
// prefixDirectory places the directory name on the beginning of each name in the list.
func prefixDirectory(directory string, names []string) []string {
if directory == "." {
return names
}
ret := make([]string, len(names))
for i, name := range names {
ret[i] = filepath.Join(directory, name)
}
return ret
}
// parsePackage analyzes the single package constructed from the named files.
// If text is non-nil, it is a string to be used instead of the content of the file,
// to be used for testing. parsePackage exits if there is an error.
func (g *Generator) parsePackage(directory string, names []string, text interface{}) {
var files []*File
var astFiles []*ast.File
g.pkg = new(Package)
fs := token.NewFileSet()
for _, name := range names {
if !strings.HasSuffix(name, ".go") {
continue
}
parsedFile, err := parser.ParseFile(fs, name, text, 0)
if err != nil {
log.Fatalf("parsing package: %s: %s", name, err)
}
astFiles = append(astFiles, parsedFile)
files = append(files, &File{
file: parsedFile,
pkg: g.pkg,
})
}
if len(astFiles) == 0 {
log.Fatalf("%s: no buildable Go files", directory)
}
g.pkg.name = astFiles[0].Name.Name
g.pkg.files = files
g.pkg.dir = directory
// Type check the package.
g.pkg.check(fs, astFiles)
}
// check type-checks the package. The package must be OK to proceed.
func (pkg *Package) check(fs *token.FileSet, astFiles []*ast.File) {
pkg.defs = make(map[*ast.Ident]types.Object)
config := types.Config{FakeImportC: true}
info := &types.Info{
Defs: pkg.defs,
}
typesPkg, err := config.Check(pkg.dir, fs, astFiles, info)
if err != nil {
log.Fatalf("checking package: %s", err)
}
pkg.typesPkg = typesPkg
}
// generate produces the String method for the named type.
func (g *Generator) generate(typeName string) {
values := make([]Value, 0, 100)
for _, file := range g.pkg.files {
// Set the state for this run of the walker.
file.typeName = typeName
file.values = nil
if file.file != nil {
ast.Inspect(file.file, file.genDecl)
values = append(values, file.values...)
}
}
if len(values) == 0 {
log.Fatalf("no values defined for type %s", typeName)
}
runs := splitIntoRuns(values)
// The decision of which pattern to use depends on the number of
// runs in the numbers. If there's only one, it's easy. For more than
// one, there's a tradeoff between complexity and size of the data
// and code vs. the simplicity of a map. A map takes more space,
// but so does the code. The decision here (crossover at 10) is
// arbitrary, but considers that for large numbers of runs the cost
// of the linear scan in the switch might become important, and
// rather than use yet another algorithm such as binary search,
// we punt and use a map. In any case, the likelihood of a map
// being necessary for any realistic example other than bitmasks
// is very low. And bitmasks probably deserve their own analysis,
// to be done some other day.
switch {
case len(runs) == 1:
g.buildOneRun(runs, typeName)
case len(runs) <= 10:
g.buildMultipleRuns(runs, typeName)
default:
g.buildMap(runs, typeName)
}
}
// splitIntoRuns breaks the values into runs of contiguous sequences.
// For example, given 1,2,3,5,6,7 it returns {1,2,3},{5,6,7}.
// The input slice is known to be non-empty.
func splitIntoRuns(values []Value) [][]Value {
// We use stable sort so the lexically first name is chosen for equal elements.
sort.Stable(byValue(values))
// Remove duplicates. Stable sort has put the one we want to print first,
// so use that one. The String method won't care about which named constant
// was the argument, so the first name for the given value is the only one to keep.
// We need to do this because identical values would cause the switch or map
// to fail to compile.
j := 1
for i := 1; i < len(values); i++ {
if values[i].value != values[i-1].value {
values[j] = values[i]
j++
}
}
values = values[:j]
runs := make([][]Value, 0, 10)
for len(values) > 0 {
// One contiguous sequence per outer loop.
i := 1
for i < len(values) && values[i].value == values[i-1].value+1 {
i++
}
runs = append(runs, values[:i])
values = values[i:]
}
return runs
}
// format returns the gofmt-ed contents of the Generator's buffer.
func (g *Generator) format() []byte {
src, err := format.Source(g.buf.Bytes())
if err != nil {
// Should never happen, but can arise when developing this code.
// The user can compile the output to see the error.
log.Printf("warning: internal error: invalid Go generated: %s", err)
log.Printf("warning: compile the package to analyze the error")
return g.buf.Bytes()
}
return src
}
// Value represents a declared constant.
type Value struct {
name string // The name of the constant.
// The value is stored as a bit pattern alone. The boolean tells us
// whether to interpret it as an int64 or a uint64; the only place
// this matters is when sorting.
// Much of the time the str field is all we need; it is printed
// by Value.String.
value uint64 // Will be converted to int64 when needed.
signed bool // Whether the constant is a signed type.
str string // The string representation given by the "go/exact" package.
}
func (v *Value) String() string {
return v.str
}
// byValue lets us sort the constants into increasing order.
// We take care in the Less method to sort in signed or unsigned order,
// as appropriate.
type byValue []Value
func (b byValue) Len() int { return len(b) }
func (b byValue) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b byValue) Less(i, j int) bool {
if b[i].signed {
return int64(b[i].value) < int64(b[j].value)
}
return b[i].value < b[j].value
}
// genDecl processes one declaration clause.
func (f *File) genDecl(node ast.Node) bool {
decl, ok := node.(*ast.GenDecl)
if !ok || decl.Tok != token.CONST {
// We only care about const declarations.
return true
}
// The name of the type of the constants we are declaring.
// Can change if this is a multi-element declaration.
typ := ""
// Loop over the elements of the declaration. Each element is a ValueSpec:
// a list of names possibly followed by a type, possibly followed by values.
// If the type and value are both missing, we carry down the type (and value,
// but the "go/types" package takes care of that).
for _, spec := range decl.Specs {
vspec := spec.(*ast.ValueSpec) // Guaranteed to succeed as this is CONST.
if vspec.Type == nil && len(vspec.Values) > 0 {
// "X = 1". With no type but a value, the constant is untyped.
// Skip this vspec and reset the remembered type.
typ = ""
continue
}
if vspec.Type != nil {
// "X T". We have a type. Remember it.
ident, ok := vspec.Type.(*ast.Ident)
if !ok {
continue
}
typ = ident.Name
}
if typ != f.typeName {
// This is not the type we're looking for.
continue
}
// We now have a list of names (from one line of source code) all being
// declared with the desired type.
// Grab their names and actual values and store them in f.values.
for _, name := range vspec.Names {
if name.Name == "_" {
continue
}
// This dance lets the type checker find the values for us. It's a
// bit tricky: look up the object declared by the name, find its
// types.Const, and extract its value.
obj, ok := f.pkg.defs[name]
if !ok {
log.Fatalf("no value for constant %s", name)
}
info := obj.Type().Underlying().(*types.Basic).Info()
if info&types.IsInteger == 0 {
log.Fatalf("can't handle non-integer constant type %s", typ)
}
value := obj.(*types.Const).Val() // Guaranteed to succeed as this is CONST.
if value.Kind() != exact.Int {
log.Fatalf("can't happen: constant is not an integer %s", name)
}
i64, isInt := exact.Int64Val(value)
u64, isUint := exact.Uint64Val(value)
if !isInt && !isUint {
log.Fatalf("internal error: value of %s is not an integer: %s", name, value.String())
}
if !isInt {
u64 = uint64(i64)
}
v := Value{
name: name.Name,
value: u64,
signed: info&types.IsUnsigned == 0,
str: value.String(),
}
f.values = append(f.values, v)
}
}
return false
}
// Helpers
// usize returns the number of bits of the smallest unsigned integer
// type that will hold n. Used to create the smallest possible slice of
// integers to use as indexes into the concatenated strings.
func usize(n int) int {
switch {
case n < 1<<8:
return 8
case n < 1<<16:
return 16
default:
// 2^32 is enough constants for anyone.
return 32
}
}
// declareIndexAndNameVars declares the index slices and concatenated names
// strings representing the runs of values.
func (g *Generator) declareIndexAndNameVars(runs [][]Value, typeName string) {
var indexes, names []string
for i, run := range runs {
index, name := g.createIndexAndNameDecl(run, typeName, fmt.Sprintf("_%d", i))
indexes = append(indexes, index)
names = append(names, name)
}
g.Printf("const (\n")
for _, name := range names {
g.Printf("\t%s\n", name)
}
g.Printf(")\n\n")
g.Printf("var (")
for _, index := range indexes {
g.Printf("\t%s\n", index)
}
g.Printf(")\n\n")
}
// declareIndexAndNameVar is the single-run version of declareIndexAndNameVars
func (g *Generator) declareIndexAndNameVar(run []Value, typeName string) {
index, name := g.createIndexAndNameDecl(run, typeName, "")
g.Printf("const %s\n", name)
g.Printf("var %s\n", index)
}
// createIndexAndNameDecl returns the pair of declarations for the run. The caller will add "const" and "var".
func (g *Generator) createIndexAndNameDecl(run []Value, typeName string, suffix string) (string, string) {
b := new(bytes.Buffer)
indexes := make([]int, len(run))
for i := range run {
b.WriteString(run[i].name)
indexes[i] = b.Len()
}
nameConst := fmt.Sprintf("_%s_name%s = %q", typeName, suffix, b.String())
nameLen := b.Len()
b.Reset()
fmt.Fprintf(b, "_%s_index%s = [...]uint%d{", typeName, suffix, usize(nameLen))
for i, v := range indexes {
if i > 0 {
fmt.Fprintf(b, ", ")
}
fmt.Fprintf(b, "%d", v)
}
fmt.Fprintf(b, "}")
return b.String(), nameConst
}
// declareNameVars declares the concatenated names string representing all the values in the runs.
func (g *Generator) declareNameVars(runs [][]Value, typeName string, suffix string) {
g.Printf("const _%s_name%s = \"", typeName, suffix)
for _, run := range runs {
for i := range run {
g.Printf("%s", run[i].name)
}
}
g.Printf("\"\n")
}
// buildOneRun generates the variables and String method for a single run of contiguous values.
func (g *Generator) buildOneRun(runs [][]Value, typeName string) {
values := runs[0]
g.Printf("\n")
g.declareIndexAndNameVar(values, typeName)
// The generated code is simple enough to write as a Printf format.
lessThanZero := ""
if values[0].signed {
lessThanZero = "i < 0 || "
}
if values[0].value == 0 { // Signed or unsigned, 0 is still 0.
g.Printf(stringOneRun, typeName, usize(len(values)), lessThanZero)
} else {
g.Printf(stringOneRunWithOffset, typeName, values[0].String(), usize(len(values)), lessThanZero)
}
}
// Arguments to format are:
// [1]: type name
// [2]: size of index element (8 for uint8 etc.)
// [3]: less than zero check (for signed types)
const stringOneRun = `func (i %[1]s) String() string {
if %[3]si >= %[1]s(len(_%[1]s_index)) {
return fmt.Sprintf("%[1]s(%%d)", i)
}
hi := _%[1]s_index[i]
lo := uint%[2]d(0)
if i > 0 {
lo = _%[1]s_index[i-1]
}
return _%[1]s_name[lo:hi]
}
`
// Arguments to format are:
// [1]: type name
// [2]: lowest defined value for type, as a string
// [3]: size of index element (8 for uint8 etc.)
// [4]: less than zero check (for signed types)
/*
*/
const stringOneRunWithOffset = `func (i %[1]s) String() string {
i -= %[2]s
if %[4]si >= %[1]s(len(_%[1]s_index)) {
return fmt.Sprintf("%[1]s(%%d)", i + %[2]s)
}
hi := _%[1]s_index[i]
lo := uint%[3]d(0)
if i > 0 {
lo = _%[1]s_index[i-1]
}
return _%[1]s_name[lo : hi]
}
`
// buildMultipleRuns generates the variables and String method for multiple runs of contiguous values.
// For this pattern, a single Printf format won't do.
func (g *Generator) buildMultipleRuns(runs [][]Value, typeName string) {
g.Printf("\n")
g.declareIndexAndNameVars(runs, typeName)
g.Printf("func (i %s) String() string {\n", typeName)
g.Printf("\tswitch {\n")
for i, values := range runs {
if len(values) == 1 {
g.Printf("\tcase i == %s:\n", &values[0])
g.Printf("\t\treturn _%s_name_%d\n", typeName, i)
continue
}
g.Printf("\tcase %s <= i && i <= %s:\n", &values[0], &values[len(values)-1])
if values[0].value != 0 {
g.Printf("\t\ti -= %s\n", &values[0])
}
g.Printf("\t\tlo := uint%d(0)\n", usize(len(values)))
g.Printf("\t\tif i > 0 {\n")
g.Printf("\t\t\tlo = _%s_index_%d[i-1]\n", typeName, i)
g.Printf("\t\t}\n") | }
g.Printf("\tdefault:\n")
g.Printf("\t\treturn fmt.Sprintf(\"%s(%%d)\", i)\n", typeName)
g.Printf("\t}\n")
g.Printf("}\n")
}
// buildMap handles the case where the space is so sparse a map is a reasonable fallback.
// It's a rare situation but has simple code.
func (g *Generator) buildMap(runs [][]Value, typeName string) {
g.Printf("\n")
g.declareNameVars(runs, typeName, "")
g.Printf("\nvar _%s_map = map[%s]string{\n", typeName, typeName)
n := 0
for _, values := range runs {
for _, value := range values {
g.Printf("\t%s: _%s_name[%d:%d],\n", &value, typeName, n, n+len(value.name))
n += len(value.name)
}
}
g.Printf("}\n\n")
g.Printf(stringMap, typeName)
}
// Argument to format is the type name.
const stringMap = `func (i %[1]s) String() string {
if str, ok := _%[1]s_map[i]; ok {
return str
}
return fmt.Sprintf("%[1]s(%%d)", i)
}
` | g.Printf("\t\treturn _%s_name_%d[lo:_%s_index_%d[i]]\n", typeName, i, typeName, i) |
reports.module.ts | import { Module } from '@nestjs/common';
import { ReportsResolver } from './reports.resolver';
import { ReportsService } from './reports.service';
| })
export class ReportsModule {} | @Module({
providers: [ReportsResolver, ReportsService] |
math.ts | import { Rect } from '@shopify/javascript-utilities/geometry';
export type PreferredPosition = 'above' | 'below' | 'belowRight' | 'belowLeft' | 'left' | 'right' | 'mostSpace';
export type PreferredAlignment = 'left' | 'center' | 'right';
export interface Margins {
activator: number;
container: number;
horizontal: number;
}
// Function used to calculate vertical position of overlay
export function calculateVerticalPosition(
activatorRect: Rect,
overlayRect: Rect,
overlayMargins: Margins,
scrollableContainerRect: Rect,
containerRect: Rect,
preferredPosition: PreferredPosition,
fixed: boolean | undefined
) {
const activatorTop = activatorRect.top;
const activatorBottom = activatorTop + activatorRect.height;
const spaceAbove = activatorRect.top;
const spaceBelow = containerRect.height - activatorRect.top - activatorRect.height;
const desiredHeight = overlayRect.height;
const verticalMargins = overlayMargins.activator + overlayMargins.container;
const minimumSpaceToScroll = overlayMargins.container;
const distanceToTopScroll = activatorRect.top - Math.max(scrollableContainerRect.top, 0);
const distanceToBottomScroll = containerRect.top +
Math.min(containerRect.height,
scrollableContainerRect.top + scrollableContainerRect.height
) -
(activatorRect.top + activatorRect.height);
const enoughSpaceFromTopScroll = distanceToTopScroll >= minimumSpaceToScroll;
const enoughSpaceFromBottomScroll = distanceToBottomScroll >= minimumSpaceToScroll;
const heightIfBelow = Math.min(spaceBelow, desiredHeight);
const heightIfAbove = Math.min(spaceAbove, desiredHeight);
const containerRectTop = fixed ? 0 : containerRect.top;
// Calculate dimentions of overlay when overlay needs to be displayed on top / above of dropdown
const positionIfAbove = {
height: heightIfAbove - verticalMargins,
top: activatorTop + containerRectTop - heightIfAbove,
positioning: 'above',
};
// Calculate dimentions of overlay when overlay needs to be displayed on bottom / below of dropdown
const positionIfBelow = {
height: heightIfBelow - verticalMargins,
top: activatorBottom + containerRectTop,
positioning: 'below',
};
// Calculate dimentions of overlay when overlay needs to be displayed on left / right side of dropdown
const positionIfLeftOrRight = {
height: overlayRect.height,
top: positionIfBelow.top - activatorRect.height - ((overlayRect.height - activatorRect.height) / 2),
positioning: 'below',
};
if (preferredPosition === 'above') return positionIfAbove;
if (preferredPosition === 'below') return positionIfBelow;
if (preferredPosition === 'right' || preferredPosition === 'left') {
return positionIfLeftOrRight;
}
if (enoughSpaceFromTopScroll && enoughSpaceFromBottomScroll) {
return spaceAbove > spaceBelow ? positionIfAbove : positionIfBelow;
}
return distanceToTopScroll > minimumSpaceToScroll
? positionIfAbove
: positionIfBelow;
}
// Function used to calculate horizontal position of overlay
export function calculateHorizontalPosition(
activatorRect: Rect,
overlayRect: Rect,
containerRect: Rect,
overlayMargins: Margins,
preferredAlignment: PreferredAlignment,
preferredPosition: PreferredPosition,
preloadedPopover: boolean
) {
const maximum = containerRect.width - overlayRect.width;
// Define when overlay needs to be displayed left aligned with dropdown's left side
if (preferredAlignment === 'left') {
return 0;
// Define when overlay needs to be displayed left aligned with dropdown's left side
} if (preferredAlignment === 'right') {
return -(overlayRect.width - activatorRect.width - 16);
}
if (preferredAlignment === 'center' && (preferredPosition === 'below' || preferredPosition === 'above')) {
return Math.min(
maximum,
Math.max(0, activatorRect.center.x - overlayRect.width / 2)
);
}
// Define when overlay needs to be displayed center aligned with dropdown node
if (preferredAlignment === 'center') {
return -(overlayRect.width - activatorRect.width - 16) / 2 ;
// Define when overlay needs to be displayed center aligned with dropdown node
}
if (preferredAlignment === 'center' && (preferredPosition === 'above' || preferredPosition === 'below') && !preloadedPopover) {
return Math.min(
maximum,
Math.max(
0,
activatorRect.left - ((activatorRect.width - overlayRect.width) / 2)
)
);
// Define when overlay needs to be displayed left or right side of dropdown
} if (preferredPosition === 'left' || preferredPosition === 'right') {
return Math.min(
maximum,
Math.max(0, preferredPosition === 'right' ? (activatorRect.center.x + activatorRect.width / 2) : (activatorRect.center.x - (activatorRect.width) - (activatorRect.width / 2)))
);
}
return Math.min(
maximum,
Math.max(0, activatorRect.center.x - overlayRect.width / 2)
);
}
export function | (inner: Rect, outer: Rect) {
const { center } = inner;
return center.y < outer.top || center.y > outer.top + outer.height;
}
| rectIsOutsideOfRect |
mmp_mcss_objects.py | ###################################################################
""" Summary: Class and Methods for deriving MCSS based MMP's
About: Derive a matched pair based MCSS from a pair molecules
To do: - extend the method enumerate_fragment_properties to also
enumerate self.mol_smi_dict as this would allow the addition
of a flag '-p' that prints out whole molecule props alongside
MCSS and therefore compute %molecule that the MCSS covers
- could use other descriptors from IW code to get MCSS via
bond count not #Atoms or
- Should move iterators in process_mcss_list_to_string to be numeric
and store numeric ID's in self.largest_mcs_mmp_double/single
- could allow further switched to change behaviour of tie break
where single/double or double alone give tie break MCSS
[connected substructures versus disconnected or both/either]
- Extension to triple cut would allow improved search/match e.g.:
N1(C(c2c(cc3c(c2)OCO3)CC1)c4cc(c(c(c4)OC)O)OC)C(=O)OC CHEMBL311765
N1(C(c2c(cc(cc2)O)CC1)c3ccc(cc3)OCCN4CCCC4)C(=O)OCC CHEMBL94080
"""
###################################################################
import logging
import csv
import os
import sys
import unittest
import tempfile
from builtins import range
from mmp.mmp_data_objects import MMPDataObjectClass
if 'LILLYMOL_HOME' in os.environ:
import pybase.pyopmo as pymo
else:
import pybase.pymo as pymo
class MMPbasedMCSSObjectClass(MMPDataObjectClass):
def __init__(self, logger_object):
"""
Example usage:
mmplogger = logging.getLogger('lillymol_file_logger')
logging.disable(logging.CRITICAL)
my_mmp_mcss_object = MMPbasedMCSSObjectClass(mmplogger)
"""
MMPDataObjectClass.__init__(self, logger_object)
self.logger = logger_object
if len(logging.Logger.manager.loggerDict) < 1:
# exit with system status 1 and custom error
sys.exit("Invalid or no logger object passed to MMPObjectClass. Please create \
and pass a logger and set to use logging.disable if you don't want logging")
# this is used for storing the largest MCS MMP for given pair
self.largest_mcs_mmp_result = {}
self.ref_smi_props = {}
def clean_out_data_mcss_obj(self):
"""Method to clean out all objects in class"""
self.clean_out_data()
self.mcs_mmp.clear()
def enumerate_fragment_properties(self):
"""Writes out the ref_smi_dict to disk, calculates natoms, returns data to self.ref_smi_props
Some complexities in method such as double cut fragments (iw_descr only calcs largest frag)"""
frag_smi_file = tempfile.NamedTemporaryFile(delete=False, suffix='.smi')
frag_smi_props_out = tempfile.NamedTemporaryFile(delete=False)
with open(frag_smi_file.name, "w") as f:
for item in self.refsmi_dict:
if isinstance(item, int):
# can't see an easy way to do this except string compare, [1H] causes iw_descr to crash out
if self.refsmi_dict[item] != '[1H]':
f.write(self.refsmi_dict[item]+" "+str(item)+"\n")
# run pymo.iwdescr
self.logger.info("Running pymo.iwdescr on %s smi with in:%s, out:%s" %
(len(self.refsmi_dict), frag_smi_file.name, frag_smi_props_out.name))
exit_status = pymo.iwdescr(frag_smi_file.name, frag_smi_props_out.name, params_dict={'-l': '', '-v': ''},
loggero=self.logger)
self.logger.debug("Ran iwdescr with exit status %s" % exit_status)
with open(frag_smi_props_out.name, "r") as csv_file:
reader = csv.reader(csv_file, delimiter=' ')
i = -1
for row in reader:
i += 1
# if header row, append headers
if i == 0:
if row[1] != 'w_natoms':
self.logger.warn("When this was written, NATOMs was in array position 1 (zero indexed) with "
"column title w_natoms. Now it's not, it's: %s" % row[1])
sys.exit("When this was written, NATOMs was in array position 1 (zero indexed) with column "
"title w_natom. Now it's not, it's: %s" % row[1])
continue
# we trust there is only one entry per id
# print row[0], row[1]
self.ref_smi_props[int(row[0])] = int(row[1])
frag_smi_props_out.close()
self.logger.debug("Completed load of %s mol props from dict of %s from file %s" %
(len(self.ref_smi_props), len(self.refsmi_dict)/2, frag_smi_props_out.name))
def get_largest_mcs_pairs(self, out_file, cut_type, mdc_atm_soft=None, mdc_atm_soft_threshold=None,
mdc_atm_hard=None):
"""Method to print out a single smi - smi pair from the input CSV with data differences. Selection of the
exact matched pair for a given smi - smi combination is based on the largest Maximum Common Substructure
which equates to the MMP with the smallest MWT/#Atoms difference across all MMP's for that smi/smi combo
out_file:
The user specified output file
cut_type:
Specifies the type of fragmentation required. Allowed values are SINGLE,
DOUBLE or BOTH. Currently this class does not support anything greater than
double cut fragmentation
mdc_atm_hard:
max double cut atom cutoff (hard)
Never consider double cut context fragments where one half has num_atoms <= mdc_atm_hard
i.e.: this is a hard cutoff filter implemented during dicer parsing
mdc_atm_soft:
max double cut atom cutoff (soft)
* must be used with mdc_atm_soft_threshold
When double cut is greater than single, if one part of double context has num_atoms <= mdc_atm_soft and
total double cut atom <= single cut atoms + mdc_atm_soft_threshold then discard
mdc_atm_soft_threshold:
max double cut atom cutoff threshold (soft)
* must be used with mdc_atm_soft
This gets added to single cut num atoms each comparison that's done, if and when mdc_atm_soft is set
see details of mdc_atm_soft
Example usage:
# give me a CSV named my_output.pairs of all MCS based pairs:
my_mmp_object.get_largest_mcs_pairs('myoutput.csv', 'BOTH', 'DICER')
# give me a CSV of only the DOUBLE cut MCS based pairs with RDKit attachment points:
my_mmp_object.get_largest_mcs_pairs('myoutput.csv', 'DOUBLE', 'RDKIT')
"""
if (mdc_atm_soft is not None and mdc_atm_soft_threshold is None) or\
(mdc_atm_soft is None and mdc_atm_soft_threshold is not None):
sys.exit("Error, mdc_atm_soft and mdc_atm_soft_threshold must be specified together.")
def process_mcss_list_to_string(prefix, input_list):
"""sub method to build a printable string from input list of specific structure"""
out_string = ''
num_of_entries = len(input_list)
if num_of_entries > 4:
for i_ in range(0, num_of_entries, 4):
out_string = out_string + prefix + "_" + str((i_/4)+1) + "," + str(molid_L) + "," + str(molid_R)
out_string = out_string + "," + str(sum(input_list[0 + i_])) + "," + str(input_list[1 + i_]) + ","
out_string = out_string + str(input_list[2 + i_]) + "," + str(input_list[3 + i_])
out_string += "\n"
else:
if len(input_list[1]) > 1:
ctx_smi = self.refsmi_dict[input_list[1][0]] + "." + self.refsmi_dict[input_list[1][1]]
else:
ctx_smi = self.refsmi_dict[input_list[1][0]]
out_string = prefix + "," + str(molid_L) + "," + str(molid_R) + ","
out_string = out_string + str(sum(input_list[0])) + "," + ctx_smi + ","
out_string = out_string + str(self.refsmi_dict[input_list[2]]) + "," \
+ str(self.refsmi_dict[input_list[3]])
out_string += "\n"
return out_string
def disambiguate_double_list(input_list):
"""sub method to untangle double cut tie break cases"""
num_of_entries = len(input_list)
filtered_list = []
# The tie code should have only saved the example with the largest 'smallest fragment' size
# so now we just take the first example where atom numbering [1 before [2
# Theoretically, if two different examples of a double cut fragmentation pattern exist with the same number
# of atoms *in both parts* of the context, then there is another tie break here. e.g.:
# num_atoms in context = (2,10) should always appear not (1,11) but can't disentangle many (1,11)
# Decided not to handle this and instead just take the first one with the ordered numbering
for i_ in range(0, num_of_entries, 4):
# only use if the isomeric label is the right way round, [1 before [2
if '[1' in self.refsmi_dict[input_list[1 + i_][0]]:
filtered_list = input_list[(0 + i_): (4 + i_)]
else:
continue
return filtered_list
def remove_atom_num_dupes(input_list):
"""sub method to get only 1 example of simple isomeric numbering flip"""
# only use if the isomeric label is the right way round, [1 before [2
if '[1' in self.refsmi_dict[input_list[1][0]]:
# take the first 4 items
output_list = input_list[:4]
else:
# just take the last 4 items
output_list = input_list[-4:]
return output_list
self.logger.info('Opening output file for write: %s' % out_file)
# check cut_type, convert to int
if cut_type.upper() == 'DOUBLE':
# confusing but faster later
cut_type_id = 3
elif cut_type.upper() == 'BOTH':
# confusing but faster later
cut_type_id = 2
elif cut_type.upper() == 'SINGLE':
cut_type_id = 1
else:
self.logger.warn('cut_type specification is incorrect, using single cut: %s' % cut_type.upper())
cut_type_id = 1
# fail if both single_pairs_dict and double_pairs_dict are empty
if (len(self.single_pairs_dict) == 0) and (len(self.double_pairs_dict) == 0):
self.logger.debug('No data found in single_pairs_dict and/or double_pairs_dict, expect no results')
# sys.exit("Error: no data found in single_pairs_dict and/or double_pairs_dict, nothing to find and write")
#
# Here we build data structures of type:
# self.largest_mcs_mmp_result[(molid_L, molid_R)] = [(#atoms, #atoms or None),
# (context_id, context_id or None), frag_Left_id, frag_Right_id]
#
# single - this is easy as we only keep/store the one with the greatest number of atoms
if cut_type_id <= 2:
for molid_L, molid_R, ctx_id, frag_L_id, frag_R_id in \
self.iterator_single_pairs_dict_numeric(inc_attachpt=False):
if (molid_L, molid_R) in self.largest_mcs_mmp_result:
if self.largest_mcs_mmp_result[(molid_L, molid_R)][0][0] <= self.ref_smi_props[ctx_id]:
if self.largest_mcs_mmp_result[(molid_L, molid_R)][0][0] == self.ref_smi_props[ctx_id]:
self.largest_mcs_mmp_result[(molid_L, molid_R)].extend(
[(self.ref_smi_props[ctx_id], ), (ctx_id, ), frag_L_id, frag_R_id])
else:
self.largest_mcs_mmp_result[(molid_L, molid_R)] = [
(self.ref_smi_props[ctx_id], ), (ctx_id, ), frag_L_id, frag_R_id]
else:
self.largest_mcs_mmp_result[(molid_L, molid_R)] = [
(self.ref_smi_props[ctx_id], ), (ctx_id, ), frag_L_id, frag_R_id]
# now build the final results on the fly
# double - for each one we compare against what we already have in self.largest_mcs_mmp_result
ctx_natoms = None
if cut_type_id >= 2:
for molid_L, molid_R, ctx1_id, ctx2_id, frag_L_id, frag_R_id in \
self.iterator_double_pairs_dict_numeric(inc_attachpt=False):
#
if ctx1_id in self.ref_smi_props:
ctx_natoms = (self.ref_smi_props[ctx1_id], )
else:
ctx1_smi = self.refsmi_dict[ctx1_id]
ctx1_smi = ctx1_smi.replace("[1", "[9")
ctx1_smi = ctx1_smi.replace("[2", "[1")
ctx1_smi = ctx1_smi.replace("[9", "[2")
try:
ctx_natoms = (self.ref_smi_props[self.refsmi_dict[ctx1_smi]], )
except:
print("ERR >>>")
print(("{} {} {} {} {} {}".format(molid_L, molid_R, ctx1_id, ctx2_id, frag_L_id, frag_R_id)))
print(("{} {} {}".format(ctx1_id, ctx1_smi, self.refsmi_dict[ctx1_smi])))
print("")
if ctx2_id in self.ref_smi_props:
ctx_natoms = ctx_natoms + (self.ref_smi_props[ctx2_id], ) | ctx2_smi = ctx2_smi.replace("[2", "[1")
ctx2_smi = ctx2_smi.replace("[9", "[2")
ctx_natoms = ctx_natoms + (self.ref_smi_props[self.refsmi_dict[ctx2_smi]], )
# If the indicator flag check_all_context is set to true we need to pre-filter all ctx fragments
# to ensure they are greater than or equal to the specified limit for mdc_atm_hard (maximum double
# cut atoms hard limit). This is a crude filter and could remove valid double cut MCSS.
if mdc_atm_hard is not None:
if ctx_natoms[0] <= mdc_atm_hard:
continue
elif ctx_natoms[1] <= mdc_atm_hard:
continue
#
# Main
# have we seen this smi - smi pair before?
if (molid_L, molid_R) in self.largest_mcs_mmp_result:
# get the number of atoms in the context
num_atoms_existing = self.largest_mcs_mmp_result[(molid_L, molid_R)][0]
if len(num_atoms_existing) > 1:
total_num_atoms_existing = sum(num_atoms_existing)
else:
total_num_atoms_existing = num_atoms_existing[0]
total_num_atoms_new = sum(ctx_natoms)
if total_num_atoms_new > total_num_atoms_existing:
# if it is a double and we have a min fragment setting
if mdc_atm_soft is not None:
# if it falls below the threshold at which we apply this min frag setting
if total_num_atoms_new <= (total_num_atoms_existing + mdc_atm_soft_threshold):
# only keep if both frag sizes are legal
if '[1' in self.refsmi_dict[ctx1_id]:
if (ctx_natoms[0] > mdc_atm_soft) and (ctx_natoms[1] > mdc_atm_soft):
self.largest_mcs_mmp_result[(molid_L, molid_R)] = \
[ctx_natoms, (ctx1_id, ctx2_id), frag_L_id, frag_R_id]
# above threshold so keep anyway
else:
if '[1' in self.refsmi_dict[ctx1_id]:
self.largest_mcs_mmp_result[(molid_L, molid_R)] = \
[ctx_natoms, (ctx1_id, ctx2_id), frag_L_id, frag_R_id]
else:
if '[1' in self.refsmi_dict[ctx1_id]:
self.largest_mcs_mmp_result[(molid_L, molid_R)] = \
[ctx_natoms, (ctx1_id, ctx2_id), frag_L_id, frag_R_id]
# tie-break
elif total_num_atoms_new == total_num_atoms_existing:
# single always wins over double, so only consider this if existing is double
# double cut tie breaks get disambiguated later using custom function
if len(num_atoms_existing) == 1:
continue
else:
# consider the size of the 'smallest fragment' and add if same, replace if bigger,
# drop if smaller
if min(ctx_natoms) > min(num_atoms_existing):
if '[1' in self.refsmi_dict[ctx1_id]:
self.largest_mcs_mmp_result[(molid_L, molid_R)] = \
[ctx_natoms, (ctx1_id, ctx2_id), frag_L_id, frag_R_id]
elif min(ctx_natoms) == min(num_atoms_existing):
self.largest_mcs_mmp_result[(molid_L, molid_R)].extend(
[ctx_natoms, (ctx1_id, ctx2_id), frag_L_id, frag_R_id])
else:
# don't store as we have a better context with a larger 'smallest fragment'
continue
# double cut context must be smaller than what we already have so discard this new one
else:
continue
else:
# new result, case where we only have a double cut MCSS so add it!
if '[1' in self.refsmi_dict[ctx1_id]:
self.largest_mcs_mmp_result[(molid_L, molid_R)] = [ctx_natoms, (ctx1_id, ctx2_id),
frag_L_id, frag_R_id]
with open(out_file, "w") as final_out:
final_out.write('CUT_TYPE,MOL_ID_L,MOL_ID_R,NATOMS,MCSS,FRAG_L,FRAG_R\n')
# do single cut first as these take precedence above a double
for (molid_L, molid_R) in self.largest_mcs_mmp_result:
list_length = len(self.largest_mcs_mmp_result[(molid_L, molid_R)])
# the list self.largest_mcs_mmp_result[(molid_L, molid_R)] contains an ordered list of items
# the first 4 are (1) a tuple of the num_atoms (2) fragment (3&4) context in two parts
# Therefore if the list is greater than 8 items it means we have more than one double
# cut that we need to consider, possibly as a double cut tie break. We do not consider the
# case where there are 8 items as we know this will be two identical fragmentation patterns
# with differing isomeric numbering on the atom attachment points therefore we use >8 not >=8
if list_length > 8:
if len(self.largest_mcs_mmp_result[(molid_L, molid_R)][0]) == 1:
# disambiguate single cut list
final_out.write(process_mcss_list_to_string('SINGLE', self.largest_mcs_mmp_result[
(molid_L, molid_R)][0:4]))
else:
# print("Double won (a): ", molid_L, molid_R, self.largest_mcs_mmp_result[(molid_L, molid_R)])
new_list = disambiguate_double_list(self.largest_mcs_mmp_result[(molid_L, molid_R)])
final_out.write(process_mcss_list_to_string('DOUBLE', new_list))
elif list_length == 4:
# print("Single won (a): ", molid_L, molid_R, self.largest_mcs_mmp_result[(molid_L, molid_R)])
final_out.write(process_mcss_list_to_string('SINGLE', self.largest_mcs_mmp_result[
(molid_L, molid_R)]))
else:
# print("Double wins (b): ", molid_L, molid_R, self.largest_mcs_mmp_result[(molid_L, molid_R)])
# need to remove atom numbering dupes then print
new_list = remove_atom_num_dupes(self.largest_mcs_mmp_result[(molid_L, molid_R)])
final_out.write(process_mcss_list_to_string('DOUBLE', new_list))
class _TestMMPbasedMCSSObjectClass(unittest.TestCase):
"""Test class for MMPDataObjectClass(object) written to use pythons unittest
Example usage:
python mmp_mcss_objects.py
coverage run mmp_mcss_objects.py
coverage report mmp_mcss_objects.py
"""
def setUp(self):
"""Instantiate temp file names, test data objects that get written to temp files
a silent logger object (needed to instantiate class) and the mmp object we'll test"""
self.maxDiff = None
# setup test data location use tempfile.NamedTemporaryFile(delete=False) to persist data on disk
self.temp_file_input_smi_01 = tempfile.NamedTemporaryFile(delete=False, suffix=".smi",
encoding='utf-8', mode='wt')
self.temp_file_input_smi_03 = tempfile.NamedTemporaryFile(delete=False, suffix=".smi",
encoding='utf-8', mode='wt')
self.temp_file_output_pairs = tempfile.NamedTemporaryFile(delete=False)
# setup a logger object
self.mmplogger = logging.getLogger('mmpobjectclass_testlogger')
# logging.disable(logging.CRITICAL)
# create empty mmp object
self.test_mmp_mcss_object = MMPbasedMCSSObjectClass(self.mmplogger)
# data set for use in testing input
self.test_dataset_goldeninput_smi_01 = {
# The following represent synthetic data, analogues of CHEMBL1382609
# https://www.ebi.ac.uk/chembl/compound_report_card/CHEMBL1382609/
# 1. substituents are added to the pyrazole ring to generate side chain MMPs
# H on CHEMBL1382609 between two methyls is changed to Br, F, C, I to
# visually see the change in the smiles string (avoiding Cl as already present)
# e.g.: N1C(=C(Br)C(=N1)C)C
# 2. core ring system is modified (phenyl to pyridine) to see ring switch MMP's
# Presence/Absence of Pyridine-N and N-positional isomerism in Cl-Ph ring
# e.g.: C2=NC(=CS2)C2=CC=C(Cl)C=C2 + addition of N ->
# C2=NC(=CS2)C2=CN=C(Cl)C=C2 + move N around ring ->
# C2=NC(=CS2)C2=NC=C(Cl)C=C2
# for 1,2 single wins
'001': 'N1(C2=NC(=CS2)C2=CC=C(Cl)C=C2)C(=C(Br)C(=N1)C)C',
'002': 'N1(C2=NC(=CS2)C2=CC=C(Cl)C=C2)C(=C(F)C(=N1)C)C',
# for 2,5 double wins tie
'003': 'N1(C2=NC(=CS2)C2=CN=C(Cl)C=C2)C(=C(F)C(=N1)C)C',
# The following represent synthetic data, analogues of CHEMBL1341352
# for 1341352 and it's synthetic unsubstituted analogue there is no double
# https://www.ebi.ac.uk/chembl/compound_report_card/CHEMBL1341352/
'1341352': 'Cc1cc(nn1CC(=O)NCc2ccccc2)C(F)(F)F',
'004': 'c1cc(nn1CC(=O)NCc2ccccc2)',
# more double cut only
# https://www.ebi.ac.uk/chembl/compound_report_card/CHEMBL6211
# https://www.ebi.ac.uk/chembl/compound_report_card/CHEMBL6232
'6211': 'O=C(OCC1N(C(=O)c2cc(c(OC)c(c2)OC)OC)CCN(C1)C(=O)c1cc(c(OC)c(OC)c1)OC)CCCCCCC',
'6232': 'O=C(N1C(CN(C(=O)c2cc(c(OC)c(c2)OC)OC)CC1)COC(=O)CC(C)(C)C)c1cc(c(OC)c(OC)c1)OC'
}
self.test_dataset_goldeninput_smi_03 = {
# repeat of above
'001': 'N1(C2=NC(=CS2)C2=CC=C(Cl)C=C2)C(=C(Br)C(=N1)C)C',
'002': 'N1(C2=NC(=CS2)C2=CC=C(Cl)C=C2)C(=C(F)C(=N1)C)C',
}
# all smiles are output from above input as either a repeat smiles or a fragment of them
self.test_dataset_golden_output_01 = {'CUT_TYPE,MOL_ID_L,MOL_ID_R,NATOMS,MCSS,FRAG_L,FRAG_R': None,
'SINGLE,1,2,19,Clc1ccc(c2csc([n]3[n]c([1cH]c3C)C)[n]2)cc1,[1BrH],[1FH]': None,
'SINGLE,2,1,19,Clc1ccc(c2csc([n]3[n]c([1cH]c3C)C)[n]2)cc1,[1FH],[1BrH]': None,
'DOUBLE,2,3,14,[1ClH].Fc1c([n](c2sc[2cH][n]2)[n]c1C)C,[1cH]1cc[2cH]cc1,[n]1[1cH]cc[2cH]c1': None,
'DOUBLE,3,2,14,[1ClH].Fc1c([n](c2sc[2cH][n]2)[n]c1C)C,[n]1[1cH]cc[2cH]c1,[1cH]1cc[2cH]cc1': None,
'SINGLE,1341352,4,11,O=C(NCc1ccccc1)[1CH3],Cc1[1nH][n]c(C(F)(F)F)c1,[1nH]1[n]ccc1': None,
'SINGLE,4,1341352,11,O=C(NCc1ccccc1)[1CH3],[1nH]1[n]ccc1,Cc1[1nH][n]c(C(F)(F)F)c1': None,
'DOUBLE,6211,6232,40,[1CH4].[2CH3]C(=O)OCC1N(C(=O)c2cc(c(OC)c(c2)OC)OC)CCN(C1)C(=O)c1cc(c(OC)c(OC)c1)OC,[1CH3]CCC[2CH3],C[12CH2]C': None,
'DOUBLE,6232,6211,40,[1CH4].[2CH3]C(=O)OCC1N(C(=O)c2cc(c(OC)c(c2)OC)OC)CCN(C1)C(=O)c1cc(c(OC)c(OC)c1)OC,C[12CH2]C,[1CH3]CCC[2CH3]': None}
self.test_dataset_golden_output_02 = {'CUT_TYPE,MOL_ID_L,MOL_ID_R,NATOMS,MCSS,FRAG_L,FRAG_R': None,
'SINGLE,1,2,19,Clc1ccc(c2csc([n]3[n]c([1cH]c3C)C)[n]2)cc1,[1BrH],[1FH]': None,
'SINGLE,2,1,19,Clc1ccc(c2csc([n]3[n]c([1cH]c3C)C)[n]2)cc1,[1FH],[1BrH]': None,
'SINGLE,2,3,13,Fc1c([n](c2sc[1cH][n]2)[n]c1C)C,Clc1cc[1cH]cc1,Clc1[n]c[1cH]cc1': None,
'SINGLE,3,2,13,Fc1c([n](c2sc[1cH][n]2)[n]c1C)C,Clc1[n]c[1cH]cc1,Clc1cc[1cH]cc1': None,
'SINGLE,1341352,4,11,O=C(NCc1ccccc1)[1CH3],Cc1[1nH][n]c(C(F)(F)F)c1,[1nH]1[n]ccc1': None,
'SINGLE,4,1341352,11,O=C(NCc1ccccc1)[1CH3],[1nH]1[n]ccc1,Cc1[1nH][n]c(C(F)(F)F)c1': None,
'SINGLE,6211,6232,39,[1CH3]C(=O)OCC1N(C(=O)c2cc(c(OC)c(c2)OC)OC)CCN(C1)C(=O)c1cc(c(OC)c(OC)c1)OC,[1CH3]CCCCC,C[1CH](C)C': None,
'SINGLE,6232,6211,39,[1CH3]C(=O)OCC1N(C(=O)c2cc(c(OC)c(c2)OC)OC)CCN(C1)C(=O)c1cc(c(OC)c(OC)c1)OC,C[1CH](C)C,[1CH3]CCCCC': None}
self.test_dataset_golden_output_03 = {'CUT_TYPE,MOL_ID_L,MOL_ID_R,NATOMS,MCSS,FRAG_L,FRAG_R': None,
'SINGLE,1,2,19,Clc1ccc(c2csc([n]3[n]c([1cH]c3C)C)[n]2)cc1,[1BrH],[1FH]': None,
'SINGLE,2,1,19,Clc1ccc(c2csc([n]3[n]c([1cH]c3C)C)[n]2)cc1,[1FH],[1BrH]': None,
'DOUBLE,1,2,19,Clc1ccc(c2csc([n]3[n]c([1cH]c3C)C)[n]2)cc1,[1BrH],[1FH]': None,
'DOUBLE,2,1,19,Clc1ccc(c2csc([n]3[n]c([1cH]c3C)C)[n]2)cc1,[1FH],[1BrH]': None}
# write test data to temp file (smi)
for smi_id, smi in list(self.test_dataset_goldeninput_smi_01.items()):
self.temp_file_input_smi_01.write(smi + " " + smi_id + "\n")
self.temp_file_input_smi_01.close()
# write test data to temp file (smi)
for smi_id, smi in list(self.test_dataset_goldeninput_smi_03.items()):
self.temp_file_input_smi_03.write(smi + " " + smi_id + "\n")
self.temp_file_input_smi_03.close()
# container for results data
self.test_dataset_testresults = {}
def tearDown(self):
"""Tear down object for clean reuse in further tests"""
# clean out the object
self.test_mmp_mcss_object.clean_out_data()
# clean out the temp data store
self.test_dataset_testresults.clear()
os.remove(self.temp_file_input_smi_01.name)
def test_get_largest_mcs_pairs_with_diff(self):
"""Test method to get largest MCS MMP for given smi - smi pair"""
# 6. full build then write of pairs to file, but only for a single named column
self.test_mmp_mcss_object.build_from_dicer(self.temp_file_input_smi_01.name, 'BOTH', 'NONE')
self.test_mmp_mcss_object.enumerate_fragment_properties()
self.test_mmp_mcss_object.get_largest_mcs_pairs(self.temp_file_output_pairs.name, 'BOTH')
# now read it back into temp object and check it's what we wrote out!
test_results_filehandle = open(self.temp_file_output_pairs.name, 'r')
for line in test_results_filehandle:
line = line.rstrip('\r')
line = line.rstrip('\n')
self.test_dataset_testresults[line] = None
test_results_filehandle.close()
#print(self.test_dataset_testresults)
self.assertEqual(self.test_dataset_golden_output_01, self.test_dataset_testresults)
def test_get_largest_mcs_pairs_mdc_atm_hard(self):
"""Test method to get largest MCS MMP for given smi - smi pair"""
# 6. full build then write of pairs to file, but only for a single named column
self.test_mmp_mcss_object.build_from_dicer(self.temp_file_input_smi_01.name, 'BOTH', 'NONE')
self.test_mmp_mcss_object.enumerate_fragment_properties()
self.test_mmp_mcss_object.get_largest_mcs_pairs(self.temp_file_output_pairs.name, 'BOTH', mdc_atm_hard=4)
# now read it back into temp object and check it's what we wrote out!
test_results_filehandle = open(self.temp_file_output_pairs.name, 'r')
for line in test_results_filehandle:
line = line.rstrip('\r')
line = line.rstrip('\n')
self.test_dataset_testresults[line] = None
test_results_filehandle.close()
#print(self.test_dataset_testresults)
self.assertEqual(self.test_dataset_golden_output_02, self.test_dataset_testresults)
def test_get_largest_mcs_pairs_mdc_atm_soft(self):
"""Test method to get largest MCS MMP for given smi - smi pair"""
# 6. full build then write of pairs to file, but only for a single named column
self.test_mmp_mcss_object.build_from_dicer(self.temp_file_input_smi_03.name, 'BOTH', 'NONE')
self.test_mmp_mcss_object.enumerate_fragment_properties()
#
self.test_mmp_mcss_object.get_largest_mcs_pairs(self.temp_file_output_pairs.name, 'BOTH')
# now read it back into temp object and check it's what we wrote out!
test_results_filehandle = open(self.temp_file_output_pairs.name, 'r')
for line in test_results_filehandle:
line = line.rstrip('\r')
line = line.rstrip('\n')
self.test_dataset_testresults[line] = None
test_results_filehandle.close()
self.test_mmp_mcss_object.get_largest_mcs_pairs(self.temp_file_output_pairs.name, 'BOTH', mdc_atm_soft=3,
mdc_atm_soft_threshold=4)
# now read it back into temp object and check it's what we wrote out!
test_results_filehandle = open(self.temp_file_output_pairs.name, 'r')
for line in test_results_filehandle:
line = line.rstrip('\r')
line = line.rstrip('\n')
self.test_dataset_testresults[line] = None
test_results_filehandle.close()
#print(self.test_dataset_testresults)
self.assertEqual(self.test_dataset_golden_output_03, self.test_dataset_testresults)
if __name__ == '__main__':
unittest.main() | else:
ctx2_smi = self.refsmi_dict[ctx2_id]
ctx2_smi = ctx2_smi.replace("[1", "[9") |
add_test.py | from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleAddModule(torch.nn.Module):
def __init__(self, inplace=False):
super(SimpleAddModule, self).__init__()
self.inplace = inplace
def forward(self, a, b):
if b.size() == torch.Size([]):
return (a * a).add(b.item())
if self.inplace:
c = a.add_(b)
return c.add_(c)
else:
c = a.add(b)
return c.add(c)
class TestAdd(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("basic", SimpleAddModule(), torch.randn(4), torch.randn(4)),
lambda: ("inplace", SimpleAddModule(True), torch.randn(4), torch.randn(4)), | SimpleAddModule(),
torch.randn(8, 3, 4, 2),
torch.randn(4, 2),
),
lambda: (
"broadcast",
SimpleAddModule(),
torch.randn(8, 3, 4, 2),
torch.randn(1, 2),
),
lambda: (
"broadcast",
SimpleAddModule(),
torch.randn(4, 2),
torch.randn(8, 3, 4, 2),
),
lambda: ("float", SimpleAddModule(), torch.randn(4), torch.tensor(1.2345)),
lambda: (
"float_and_int",
SimpleAddModule(),
torch.randn(4),
torch.tensor(42),
True,
),
lambda: (
"int32",
SimpleAddModule(),
torch.torch.randint(-10, 10, (2, 4), dtype=torch.int32),
torch.torch.randint(-10, 10, (2, 4), dtype=torch.int32),
),
lambda: (
"int64",
SimpleAddModule(),
torch.torch.randint(-10, 10, (2, 4), dtype=torch.int64),
torch.torch.randint(-10, 10, (2, 4), dtype=torch.int64),
),
]
)
def test_add(self, _, module, a, b, skip_to_glow=False):
utils.run_comparison_tests(
module,
(a, b),
fusible_ops={"aten::add_"} if module.inplace else {"aten::add"},
) | lambda: (
"broadcast", |
wsgi.py | """
WSGI config for core project. |
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'core.settings')
application = Cling(get_wsgi_application()) |
It exposes the WSGI callable as a module-level variable named ``application``. |
run_csharp.rs | use failure::{format_err, Fallible};
use std::path::Path;
use std::process::ExitStatus;
use super::{cmd, util};
pub fn | (files: Vec<&str>, stdin: &str) -> Fallible<ExitStatus> {
let work_dir = util::dirname(files[0])?;
let bin_file = "main_cs.exe";
let mut source_files = util::filter_by_extension(&files, ".cs");
let bin_arg = format!("-out:{}", bin_file);
let mut args = vec!["mcs", bin_arg.as_str()];
args.append(&mut source_files);
let status: ExitStatus = cmd::run(work_dir, args)?;
if !status.success() {
return Ok(status);
}
let bin_path_buf = Path::new(work_dir).join(bin_file);
let bin_path = bin_path_buf
.to_str()
.ok_or(format_err!("invalid bin_path"))?;
cmd::run_stdin(work_dir, vec!["mono", bin_path], stdin)
}
| run |
__main__.py | #!/usr/bin/env python3
"""
Main module for the deployable project.
"""
# Bootstrap to be able to perform absolute imports as standalone code
if __name__ == "__main__":
from absolute_import import absolute_import
absolute_import(file=__file__, name=__name__, path=__path__)
# Normal imports
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from deployable.defaults.args import description, epilog
from typing import Any, Tuple
def get_args() -> Tuple[Any]:
"""
Retrieves arguments from command line.
"""
# Create parser and groups
parser = ArgumentParser(description=description, epilog=epilog, formatter_class=RawDescriptionHelpFormatter)
def main() -> None:
|
# Call main method
if __name__ == "__main__":
main()
| """
Entrypoint.
""" |
censor.py | from typing import *
import re
class Censorship:
def __init__(self, content: Union[Any, str, None] = None) -> None:
self.content: str = content
def update_content(self, content: Any):
self.content = content
def censor(self):
| censored = ["fuck", "shit", "lmao", "lmfao", "porn", "sex", "cock", "ball"]
for censor in censored:
if censor in self.content:
lenned = len(censor)
hashes = "#" * lenned
self.content = self.content.replace(censor, hashes)
self.content = re.sub(
"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*(),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+",
"[url omitted]",
self.content,
)
return self.content |
|
median_average_test.go | package statisitcs
import (
"testing"
"github.com/hauson/lib/testsuit"
)
func TestMedianAvg(t *testing.T) {
testsuit.TestSuit{
{
Desc: "normal",
Args: []float64{1.0, 3.0, 8.0, 2.0, 4.0},
WantResults: 3.0,
}, | Desc: "null",
Args: []float64{},
WantErr: "numbers is empty",
},
}.Range(t, func(c *testsuit.TestCase) (interface{}, error) {
numbers := c.Args.([]float64)
return MedianAvg(numbers)
})
} | { |
build.py | import os
import copy
import re
import dill
import subprocess
from datetime import datetime
from collections import OrderedDict as odict
from .generator import Generator
from . import util, cmake, vsinfo
from .named_item import NamedItem
from .variant import Variant
from .build_flags import BuildFlags
from .compiler import Compiler
from .architecture import Architecture
from . import err
from .util import logdbg as dbg
# experimental. I don't think it will stay unless conan starts accepting args
from .conan import Conan
# -----------------------------------------------------------------------------
class Build(NamedItem):
"""Holds a build's settings"""
pfile = "cmany_preload.cmake"
sfile = "cmany_build.dill"
def __init__(self, proj_root, build_root, install_root,
system, arch, build_type, compiler, variant, flags,
num_jobs, kwargs):
#
self.kwargs = kwargs
self.export_compile = self.kwargs.get('export_compile', True)
#
self.projdir = util.chkf(proj_root)
self.buildroot = util.abspath(build_root)
self.installroot = util.abspath(install_root)
#
self.flags = flags
self.system = system
self.architecture = arch
self.build_type = build_type
self.compiler = compiler
self.variant = variant
#
self.adjusted = False
#
if util.in_64bit and self.architecture.is32:
if self.compiler.gcclike:
dbg("making 32 bit")
self.compiler.make_32bit()
elif util.in_32bit and self.architecture.is64:
if self.compiler.gcclike:
dbg("making 64 bit")
self.compiler.make_64bit()
#
tag = self._set_name_and_paths()
super().__init__(tag)
#
self.toolchain_file = self._get_toolchain()
if self.toolchain_file:
comps = cmake.extract_toolchain_compilers(self.toolchain_file)
c = Compiler(comps['CMAKE_CXX_COMPILER'])
self.adjust(compiler=c)
#
# WATCHOUT: this may trigger a readjustment of this build's parameters
self.generator = self.create_generator(num_jobs)
#
# This will load the vars from the builddir cache, if it exists.
# It should be done only after creating the generator.
self.varcache = cmake.CMakeCache(self.builddir)
# ... and this will overwrite (in memory) the vars with the input
# arguments. This will make the cache dirty and so we know when it
# needs to be committed back to CMakeCache.txt
self.gather_input_cache_vars()
#
self.deps = kwargs.get('deps', '')
if self.deps and not os.path.isabs(self.deps):
self.deps = os.path.abspath(self.deps)
self.deps_prefix = kwargs.get('deps_prefix')
if self.deps_prefix and not os.path.isabs(self.deps_prefix):
self.deps_prefix = os.path.abspath(self.deps_prefix)
if not self.deps_prefix:
self.deps_prefix = self.builddir
def _set_name_and_paths(self):
self.tag = __class__.get_tag(
self.system, self.architecture,
self.compiler, self.build_type, self.variant, '-')
self.buildtag = self.tag
self.installtag = self.tag # this was different in the past and may become so in the future
self.builddir = os.path.join(self.buildroot, self.buildtag)
self.installdir = os.path.join(self.installroot, self.installtag)
self.preload_file = os.path.join(self.builddir, Build.pfile)
self.cachefile = os.path.join(self.builddir, 'CMakeCache.txt')
for prop in "projdir buildroot installroot buildtag installtag builddir installdir preload_file cachefile".split(" "):
dbg(" {}: {}={}".format(self.tag, prop, getattr(self, prop)))
return self.tag
def create_generator(self, num_jobs, fallback_generator="Unix Makefiles"):
"""create a generator, adjusting the build parameters if necessary"""
#if self.toolchain_file is not None:
# toolchain_cache = cmake.get_toolchain_cache(self.toolchain_file)
# print(toolchain_cache)
# self.adjust(compiler=toolchain_cache['CMAKE_CXX_COMPILER'])
if self.compiler.is_msvc:
vsi = vsinfo.VisualStudioInfo(self.compiler.name)
g = Generator(vsi.gen, self, num_jobs)
arch = Architecture(vsi.architecture)
self.adjust(architecture=arch)
self.vsinfo = vsi
return g
else:
if self.system.name == "windows":
return Generator(fallback_generator, self, num_jobs)
else:
return Generator(Generator.default_str(), self, num_jobs)
def adjust(self, **kwargs):
for k, _ in kwargs.items():
supported = ('architecture', 'compiler')
if k not in supported:
raise err.NoSupport(f"build adjustment for {k}. Must be one of {supported}")
a = kwargs.get('architecture')
if a and a != self.architecture:
dbg(self, "adjusting architecture:", self.architecture, "---->", a)
self.adjusted = True
self.architecture = a
c = kwargs.get('compiler')
if c and c != self.compiler:
dbg(self, "adjusting compiler:", self.compiler, "---->", a)
self.adjusted = True
self.compiler = c
self._set_name_and_paths()
@staticmethod
def get_tag(s, a, c, t, v, sep='-'):
# some utilities (eg, ar) dont deal well with + in the path
# so replace + with x
# eg see https://sourceforge.net/p/mingw/bugs/1429/
sc = __class__.sanitize_compiler_name(c)
s = str(s) + sep + str(a) + sep + sc + sep + str(t)
if v is not None and isinstance(v, Variant):
v = v.name
if v and v != "none":
s += "{sep}{var}".format(sep=sep, var=str(v))
return s
@staticmethod
def sanitize_compiler_name(c):
sc = re.sub(r'\+', 'x', str(c))
return sc
def create_dir(self):
if not os.path.exists(self.builddir):
os.makedirs(self.builddir)
def _serialize(self):
# https://stackoverflow.com/questions/4529815/saving-an-object-data-persistence
protocol = 0 # serialize in ASCII
fn = os.path.join(self.builddir, __class__.sfile)
with open(fn, 'wb') as f:
dill.dump(self, f, protocol)
@staticmethod
def deserialize(builddir):
# https://stackoverflow.com/questions/4529815/saving-an-object-data-persistence
if not os.path.exists(builddir):
raise err.BuildDirNotFound(builddir)
fn = os.path.join(builddir, __class__.sfile)
if not os.path.exists(fn):
raise err.BuildSerializationNotFound(fn, builddir)
with open(fn, 'rb') as f:
return dill.load(f)
def configure_cmd(self, for_json=False):
if for_json:
return ('-C ' + self.preload_file
+ ' ' + self.generator.configure_args(for_json=for_json))
cmd = (['cmake', '-C', self.preload_file]
+ self.generator.configure_args(export_compile_commands=self.export_compile))
if self.toolchain_file:
cmd.append('-DCMAKE_TOOLCHAIN_FILE=' + self.toolchain_file)
cmd.append(self.projdir)
return cmd
def configure(self):
self.create_dir()
self.create_preload_file()
self.handle_deps()
if self.needs_cache_regeneration():
self.varcache.commit(self.builddir)
with util.setcwd(self.builddir, silent=False):
cmd = self.configure_cmd()
try:
util.runsyscmd(cmd)
self.mark_configure_done(cmd)
except Exception as e:
raise err.ConfigureFailed(self, cmd, e)
if self.export_compile:
if not self.generator.exports_compile_commands:
util.logwarn("WARNING: this generator cannot export compile commands. Use 'cmany export_compile_commands/xcc to export the compile commands.'")
def export_compile_commands(self):
# some generators (notably VS/msbuild) cannot export compile
# commands, so to get that, we'll configure a second build using the
# ninja generator so that compile_commands.json is generated;
# finally, copy over that file to this build directory
if self.needs_configure():
self.configure()
trickdir = os.path.join(self.builddir, '.export_compile_commands')
if not os.path.exists(trickdir):
os.makedirs(trickdir)
with util.setcwd(trickdir, silent=False):
cmd = ['cmake', '-G', 'Ninja', '-DCMAKE_EXPORT_COMPILE_COMMANDS=ON', '-C', self.preload_file, self.projdir]
try:
if not self.compiler.is_msvc:
util.runsyscmd(cmd)
else:
self.vsinfo.runsyscmd(cmd)
except Exception as e:
raise err.ConfigureFailed(self, cmd, e)
src = os.path.join(trickdir, "compile_commands.json")
dst = os.path.join(self.builddir, "compile_commands.json")
if os.path.exists(src):
from shutil import copyfile
if os.path.exists(dst):
os.remove(dst)
copyfile(src, dst)
util.loginfo("exported compile_commands.json:", dst)
def run_custom_cmd(self, cmd, **subprocess_args):
try:
util.runcmd(cmd, **subprocess_args, cwd=self.builddir)
except subprocess.CalledProcessError as exc:
raise err.RunCmdFailed(self, cmd, exc)
def reconfigure(self):
"""reconfigure a build directory, without touching any cache entry"""
self._check_successful_configure('reconfigure')
with util.setcwd(self.builddir, silent=False):
cmd = ['cmake', self.projdir]
try:
util.runsyscmd(cmd)
except Exception as e:
raise err.ConfigureFailed(self, cmd, e)
def _check_successful_configure(self, purpose):
if not os.path.exists(self.builddir):
raise err.BuildDirNotFound(self.builddir, purpose)
if not os.path.exists(self.varcache.cache_file):
raise err.CacheFileNotFound(self.varcache.cache_file, self.builddir, purpose)
pkf = os.path.join(self.builddir, __class__.sfile)
if not os.path.exists(pkf):
raise err.BuildSerializationNotFound(pkf, self.builddir)
def mark_configure_done(self, cmd):
self._serialize()
with util.setcwd(self.builddir):
with open("cmany_configure.done", "w") as f:
f.write(" ".join(cmd) + "\n")
def needs_configure(self):
if not os.path.exists(self.builddir):
return True
with util.setcwd(self.builddir):
if not os.path.exists("cmany_configure.done"):
return True
if self.needs_cache_regeneration():
return True
return False
def needs_cache_regeneration(self):
if os.path.exists(self.cachefile) and self.varcache.dirty:
return True
return False
def build(self, targets=[]):
self.create_dir()
with util.setcwd(self.builddir, silent=False):
if self.needs_configure():
self.configure()
self.handle_deps()
if len(targets) == 0:
if self.compiler.is_msvc:
targets = ["ALL_BUILD"]
else:
targets = ["all"]
# cmake --build and visual studio won't handle
# multiple targets at once, so loop over them.
for t in targets:
try:
cmd = self.generator.cmd([t])
util.runsyscmd(cmd)
except Exception as e:
raise err.CompileFailed(self, cmd, e)
# this was written before using the loop above.
# it can come to fail in some corner cases.
self.mark_build_done(cmd)
def rebuild(self, targets=[]):
self._check_successful_configure('rebuild')
with util.setcwd(self.builddir, silent=False):
if len(targets) == 0:
if self.compiler.is_msvc:
targets = ["ALL_BUILD"]
else:
targets = ["all"]
# cmake --build and visual studio won't handle
# multiple targets at once, so loop over them.
for t in targets:
cmd = self.generator.cmd([t])
try:
util.runsyscmd(cmd)
except Exception as e:
raise err.CompileFailed(self, cmd, e)
def mark_build_done(self, cmd):
with util.setcwd(self.builddir):
with open("cmany_build.done", "w") as f:
f.write(" ".join(cmd) + "\n")
def needs_build(self):
if not os.path.exists(self.builddir):
return True
with util.setcwd(self.builddir):
if not os.path.exists("cmany_build.done"):
return True
if self.needs_cache_regeneration():
return True
return False
def install(self):
self.create_dir()
with util.setcwd(self.builddir, silent=False):
if self.needs_build():
self.build()
cmd = self.generator.install()
try:
util.runsyscmd(cmd)
except Exception as e:
raise err.InstallFailed(self, cmd, e)
def reinstall(self):
self._check_successful_configure('reinstall')
with util.setcwd(self.builddir, silent=False):
if self.needs_build():
self.build()
cmd = self.generator.install()
try:
util.runsyscmd(cmd)
except Exception as e:
raise err.InstallFailed(self, cmd, e)
def clean(self):
self.create_dir()
with util.setcwd(self.builddir):
cmd = self.generator.cmd(['clean'])
util.runsyscmd(cmd)
os.remove("cmany_build.done")
def _get_flagseq(self):
return (
self.flags,
self.system.flags,
self.architecture.flags,
self.compiler.flags,
self.build_type.flags,
self.variant.flags
)
def _get_toolchain(self):
tc = None
for fs in self._get_flagseq():
tc = BuildFlags.merge_toolchains(tc, fs.toolchain)
if not tc:
return None
if not os.path.isabs(tc):
tc = os.path.join(os.getcwd(), tc)
tc = os.path.abspath(tc)
if not os.path.exists(tc):
raise err.ToolchainFileNotFound(tc)
return tc
def _gather_flags(self, which, append_to_sysinfo_var=None, with_defines=False):
flags = []
if append_to_sysinfo_var:
try:
flags = [cmake.CMakeSysInfo.var(append_to_sysinfo_var, self.generator)]
except RuntimeError:
pass
# append overall build flags
# append variant flags
flagseq = self._get_flagseq()
for fs in flagseq:
wf = getattr(fs, which)
for f in wf:
if isinstance(f, str):
r = f
elif isinstance(f, CFlag):
r = f.get(self.compiler)
flags.append(r)
if with_defines:
flags += fs.defines
# we're done
return flags
def _gather_cmake_vars(self):
flagseq = self._get_flagseq()
for fs in flagseq:
for v in fs.cmake_vars:
spl = v.split('=')
vval = ''.join(spl[1:]) if len(spl) > 1 else ''
nspl = spl[0].split(':')
if len(nspl) == 1:
self.varcache.setvar(nspl[0], vval, from_input=True)
elif len(nspl) == 2:
self.varcache.setvar(nspl[0], vval, nspl[1], from_input=True)
else:
raise err.Error('could not parse variable specification: {}', v)
def gather_input_cache_vars(self):
self._gather_cmake_vars()
vc = self.varcache
#
def _set(pfn, pname, pval): pfn(pname, pval, from_input=True)
if (not self.generator.is_msvc) and (not self.toolchain_file): | _set(vc.f, 'CMAKE_C_COMPILER', self.compiler.c_compiler)
_set(vc.f, 'CMAKE_CXX_COMPILER', self.compiler.path)
_set(vc.s, 'CMAKE_BUILD_TYPE', str(self.build_type))
_set(vc.p, 'CMAKE_INSTALL_PREFIX', self.installdir)
#
cflags = self._gather_flags('cflags', 'CMAKE_C_FLAGS_INIT', with_defines=True)
if cflags:
_set(vc.s, 'CMAKE_C_FLAGS', ' '.join(cflags))
#
cxxflags = self._gather_flags('cxxflags', 'CMAKE_CXX_FLAGS_INIT', with_defines=True)
if cxxflags:
_set(vc.s, 'CMAKE_CXX_FLAGS', ' '.join(cxxflags))
#
# if self.flags.include_dirs:
# _set(vc.s, 'CMANY_INCLUDE_DIRECTORIES', ';'.join(self.flags.include_dirs))
#
# if self.flags.link_dirs:
# _set(vc.s, 'CMAKE_LINK_DIRECTORIES', ';'.join(self.flags.link_dirs))
#
def create_preload_file(self):
# http://stackoverflow.com/questions/17597673/cmake-preload-script-for-cache
self.create_dir()
lines = []
s = '_cmany_set({} "{}" {})'
for _, v in self.varcache.items():
if v.from_input:
lines.append(s.format(v.name, v.val, v.vartype))
if lines:
tpl = _preload_file_tpl
else:
tpl = _preload_file_tpl_empty
now = datetime.now().strftime("%Y/%m/%d %H:%m")
txt = tpl.format(date=now, vars="\n".join(lines))
with open(self.preload_file, "w") as f:
f.write(txt)
return self.preload_file
@property
def deps_done(self):
dmark = os.path.join(self.builddir, "cmany_deps.done")
exists = os.path.exists(dmark)
return exists
def mark_deps_done(self):
with util.setcwd(self.builddir):
with open("cmany_deps.done", "w") as f:
s = ''
if self.deps:
s += self.deps + '\n'
if self.deps_prefix:
s += self.deps_prefix + '\n'
f.write(s)
def handle_deps(self):
if self.deps_done:
return
if not self.deps:
self.handle_conan()
self.mark_deps_done()
return
util.lognotice(self.tag + ': building dependencies', self.deps)
dup = copy.copy(self)
dup.builddir = os.path.join(self.builddir, 'cmany_deps-build')
dup.installdir = self.deps_prefix
util.logwarn('installdir:', dup.installdir)
dup.projdir = self.deps
dup.preload_file = os.path.join(self.builddir, self.preload_file)
dup.deps = None
dup.generator.build = dup
dup.configure()
dup.build()
try:
# if the dependencies cmake project is purely consisted of
# external projects, there won't be an install target.
dup.install()
except Exception as e:
util.logwarn(self.name + ": could not install. Maybe there's no install target?")
util.logdone(self.name + ': finished building dependencies. Install dir=', self.installdir)
self.varcache.p('CMAKE_PREFIX_PATH', self.installdir)
self.mark_deps_done()
def handle_conan(self):
if not self.kwargs.get('with_conan'):
return
doit = False
f = None
for fn in ('conanfile.py', 'conanfile.txt'):
f = os.path.join(self.projdir, fn)
cf = os.path.join(self.builddir, 'conanbuildinfo.cmake')
if os.path.exists(f) and not os.path.exists(cf):
doit = True
break
if not doit:
return
util.logdone('found conan file')
c = Conan()
c.install(self)
def json_data(self):
"""
https://blogs.msdn.microsoft.com/vcblog/2016/11/16/cmake-support-in-visual-studio-the-visual-studio-2017-rc-update/
https://blogs.msdn.microsoft.com/vcblog/2016/12/20/cmake-support-in-visual-studio-2017-whats-new-in-the-rc-update/
"""
builddir = self.builddir.replace(self.projdir, '${projectDir}')
builddir = re.sub(r'\\', r'/', builddir)
return odict([
('name', self.tag),
('generator', self.generator.name),
('configurationType', self.build_type.name),
('buildRoot', builddir),
('cmakeCommandArgs', self.configure_cmd(for_json=True)),
# ('variables', []), # this is not needed since the vars are set in the preload file
])
def get_targets(self):
with util.setcwd(self.builddir):
if self.generator.is_msvc:
# each target in MSVC has a corresponding vcxproj file
files = list(util.find_files_with_ext(self.builddir, ".vcxproj"))
files = [os.path.basename(f) for f in files]
files = [os.path.splitext(f)[0] for f in files]
return files
elif self.generator.is_makefile:
output = util.runsyscmd(["make", "help"], echo_cmd=False,
echo_output=False, capture_output=True)
output = output.split("\n")
output = output[1:] # The following are some of the valid targets....
output = [o[4:] for o in output] # take off the initial "... "
output = [re.sub(r'(.*)\ \(the default if no target.*\)', r'\1', o) for o in output]
output = sorted(output)
result = []
for o in output:
if o:
result.append(o)
return result
else:
util.logerr("sorry, feature not implemented for this generator: " +
str(self.generator))
def show_properties(self):
util.logcmd(self.name)
def p(n, v): print("{}={}".format(n, v))
if self.toolchain_file:
p('CMAKE_TOOLCHAIN_FILE', self.toolchain_file)
p('CMAKE_C_COMPILER', self.compiler.c_compiler)
p('CMAKE_CXX_COMPILER', self.compiler.path)
dont_show = ('CMAKE_INSTALL_PREFIX', 'CMAKE_CXX_COMPILER', 'CMAKE_C_COMPILER')
for _, v in self.varcache.items():
if v.from_input:
if v.name in dont_show:
continue
p(v.name, v.val)
p("PROJECT_BINARY_DIR", self.builddir)
p("CMAKE_INSTALL_PREFIX", self.installdir)
# -----------------------------------------------------------------------------
_preload_file_tpl = ("""\
# Do not edit. Will be overwritten.
# Generated by cmany on {date}
if(NOT _cmany_set_def)
set(_cmany_set_def ON)
function(_cmany_set var value type)
set(${{var}} "${{value}}" CACHE ${{type}} "")
message(STATUS "cmany: ${{var}}=${{value}}")
endfunction(_cmany_set)
endif(NOT _cmany_set_def)
message(STATUS "cmany:preload----------------------")
{vars}
message(STATUS "cmany:preload----------------------")
# if(CMANY_INCLUDE_DIRECTORIES)
# include_directories(${{CMANY_INCLUDE_DIRECTORIES}})
# endif()
#
# if(CMANY_LINK_DIRECTORIES)
# link_directories(${{CMANY_LINK_DIRECTORIES}})
# endif()
# Do not edit. Will be overwritten.
# Generated by cmany on {date}
""")
# -----------------------------------------------------------------------------
_preload_file_tpl_empty = ("""\
# Do not edit. Will be overwritten.
# Generated by cmany on {date}
message(STATUS "cmany: nothing to preload...")
""") | |
qrcode.ts | import { Gadget } from './gadget';
export class Qrcode extends Gadget { | //public value: string;
constructor() {
super('qrcode', 'Leitor de qrcode');
//this.value = (value ? value : '');
}
get isAnswered(): boolean {
return true;
}
get data() {
return {
};
}
} | |
responses.py | import io
import json
import zipfile
from functools import cached_property
from typing import Callable, Dict, KeysView, List, NamedTuple, Set, Union
import requests
from django.contrib import messages
from django.contrib.auth.mixins import UserPassesTestMixin
from django.core.exceptions import ObjectDoesNotExist, SuspiciousOperation
from django.core.files import File
from django.core.paginator import Paginator
from django.db.models import Prefetch
from django.http import (
FileResponse,
HttpResponse,
HttpResponseRedirect,
JsonResponse,
StreamingHttpResponse,
)
from django.shortcuts import redirect, reverse
from django.views import generic
from django.views.generic.base import View
from django.views.generic.detail import SingleObjectMixin
from django.views.generic.list import MultipleObjectMixin
from accounts.utils import (
hash_child_id,
hash_demographic_id,
hash_id,
hash_participant_id,
)
from exp.utils import (
RESPONSE_PAGE_SIZE,
csv_dict_output_and_writer,
csv_namedtuple_writer,
flatten_dict,
round_age,
round_ages_from_birthdays,
study_name_for_files,
)
from exp.views.mixins import (
CanViewStudyResponsesMixin,
ResearcherLoginRequiredMixin,
SingleObjectFetchProtocol,
StudyLookupMixin,
)
from studies.models import Feedback, Response, Study, Video
from studies.permissions import StudyPermission
from studies.queries import (
get_consent_statistics,
get_responses_with_current_rulings_and_videos,
)
from studies.tasks import build_framedata_dict, build_zipfile_of_videos
class ResponseDataColumn(NamedTuple):
# id: Unique key to identify data. Used as CSV column header and any portion before __ is used to create a
# sub-dictionary for JSON data.
id: str
description: str # Description for data dictionary
extractor: Callable[
[Union[Response, Dict]], Union[str, List]
] # Function to extract value from response instance or dict
optional: bool = False # is a column the user checks a box to include?
name: str = "" # used in template form for optional columns
include_by_default: bool = False # whether to initially check checkbox for field
identifiable: bool = False # used to determine filename signaling
# Columns for response downloads. Extractor functions expect Response instance
RESPONSE_COLUMNS = [
ResponseDataColumn(
id="response__id",
description="Short ID for this response",
extractor=lambda resp: str(resp.id),
name="Response ID",
),
ResponseDataColumn(
id="response__uuid",
description="Unique identifier for response. Can be used to match data to video filenames.",
extractor=lambda resp: str(resp.uuid),
name="Response UUID",
),
ResponseDataColumn(
id="response__date_created",
description="Timestamp for when participant began session, in format e.g. 2019-11-07 17:13:38.702958+00:00",
extractor=lambda resp: str(resp.date_created),
name="Date created",
),
ResponseDataColumn(
id="response__completed",
description=(
"Whether the participant submitted the exit survey; depending on study criteria, this may not align "
"with whether the session is considered complete. E.g., participant may have left early but submitted "
"exit survey, or may have completed all test trials but not exit survey."
),
extractor=lambda resp: resp.completed,
name="Completed",
),
ResponseDataColumn(
id="response__withdrawn",
description=(
"Whether the participant withdrew permission for viewing/use of study video beyond consent video. If "
"true, video will not be available and must not be used."
),
extractor=lambda resp: resp.withdrawn,
name="Withdrawn",
),
ResponseDataColumn(
id="response__parent_feedback",
description=(
"Freeform parent feedback entered into the exit survey, if any. This field may incidentally contain "
"identifying or sensitive information depending on what parents say, so it should be scrubbed or "
"omitted from published data."
),
extractor=lambda resp: resp.parent_feedback,
name="Parent feedback",
),
ResponseDataColumn(
id="response__birthdate_difference",
description=(
"Difference between birthdate entered in exit survey, if any, and birthdate of registered child "
"participating. Positive values mean that the birthdate from the exit survey is LATER. Blank if "
"no birthdate available from the exit survey."
),
extractor=lambda resp: resp.birthdate_difference,
name="Birthdate difference",
),
ResponseDataColumn(
id="response__video_privacy",
description=(
"Privacy level for videos selected during the exit survey, if the parent completed the exit survey. "
"Possible levels are 'private' (only people listed on your IRB protocol can view), 'scientific' "
"(can share for scientific/educational purposes), and 'public' (can also share for publicity). "
"In no cases may videos be shared for commercial purposes. If this is missing (e.g., family stopped "
"just after the consent form and did not complete the exit survey), you must treat the video as "
"private."
),
extractor=lambda resp: resp.privacy,
name="Video privacy level",
),
ResponseDataColumn(
id="response__databrary",
description=(
"Whether the parent agreed to share video data on Databrary - 'yes' or 'no'. If missing, you must "
"treat the video as if 'no' were selected. If 'yes', the video privacy selections also apply to "
"authorized Databrary users."
),
extractor=lambda resp: resp.databrary,
name="Databrary sharing",
),
ResponseDataColumn(
id="response__is_preview",
description=(
"Whether this response was generated by a researcher previewing the experiment. Preview data should "
"not be used in any actual analyses."
),
extractor=lambda resp: resp.is_preview,
name="Preview",
),
ResponseDataColumn(
id="consent__ruling",
description=(
"Most recent consent video ruling: one of 'accepted' (consent has been reviewed and judged to indidate "
"informed consent), 'rejected' (consent has been reviewed and judged not to indicate informed "
"consent -- e.g., video missing or parent did not read statement), or 'pending' (no current judgement, "
"e.g. has not been reviewed yet or waiting on parent email response')"
),
extractor=lambda resp: resp.most_recent_ruling,
),
ResponseDataColumn(
id="consent__arbiter",
description="Name associated with researcher account that made the most recent consent ruling",
extractor=lambda resp: resp.most_recent_ruling_arbiter,
),
ResponseDataColumn(
id="consent__time",
description="Timestamp of most recent consent ruling, format e.g. 2019-12-09 20:40",
extractor=lambda resp: resp.most_recent_ruling_date,
),
ResponseDataColumn(
id="consent__comment",
description=(
"Comment associated with most recent consent ruling (may be used to track e.g. any cases where consent "
"was confirmed by email)"
),
extractor=lambda resp: resp.most_recent_ruling_comment,
),
ResponseDataColumn(
id="consent__time",
description="Timestamp of most recent consent ruling, format e.g. 2019-12-09 20:40",
extractor=lambda resp: resp.most_recent_ruling_date,
),
ResponseDataColumn(
id="study__uuid",
description="Unique identifier of study associated with this response. Same for all responses to a given Lookit study.",
extractor=lambda resp: str(resp.study.uuid),
),
ResponseDataColumn(
id="participant__global_id",
description=(
"Unique identifier for family account associated with this response. Will be the same for multiple "
"responses from a child and for siblings, and across different studies. MUST BE REDACTED FOR "
"PUBLICATION because this allows identification of families across different published studies, which "
"may have unintended privacy consequences. Researchers can use this ID to match participants across "
"studies (subject to their own IRB review), but would need to generate their own random participant "
"IDs for publication in that case. Use participant_hashed_id as a publication-safe alternative if "
"only analyzing data from one Lookit study."
),
extractor=lambda resp: str(resp.child.user.uuid),
optional=True,
name="Parent global ID",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="participant__hashed_id",
description=(
"Identifier for family account associated with this response. Will be the same for multiple responses "
"from a child and for siblings, but is unique to this study. This may be published directly."
),
extractor=lambda resp: hash_id(
resp.child.user.uuid,
resp.study.uuid,
resp.study.salt,
resp.study.hash_digits,
),
name="Parent ID",
),
ResponseDataColumn(
id="participant__nickname",
description=(
"Nickname associated with the family account for this response - generally the mom or dad's name. "
"Must be redacted for publication."
),
extractor=lambda resp: resp.child.user.nickname,
optional=True,
name="Parent name",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="child__global_id",
description=(
"Primary unique identifier for the child associated with this response. Will be the same for multiple "
"responses from one child, even across different Lookit studies. MUST BE REDACTED FOR PUBLICATION "
"because this allows identification of children across different published studies, which may have "
"unintended privacy consequences. Researchers can use this ID to match participants across studies "
"(subject to their own IRB review), but would need to generate their own random participant IDs for "
"publication in that case. Use child_hashed_id as a publication-safe alternative if only analyzing "
"data from one Lookit study."
),
extractor=lambda resp: str(resp.child.uuid),
optional=True,
name="Child global ID",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="child__hashed_id",
description=(
"Identifier for child associated with this response. Will be the same for multiple responses from a "
"child, but is unique to this study. This may be published directly."
),
extractor=lambda resp: hash_id(
resp.child.uuid, resp.study.uuid, resp.study.salt, resp.study.hash_digits
),
name="Child ID",
),
ResponseDataColumn(
id="child__name",
description=(
"Nickname for the child associated with this response. Not necessarily a real name (we encourage "
"initials, nicknames, etc. if parents aren't comfortable providing a name) but must be redacted for "
"publication of data."
),
extractor=lambda resp: resp.child.given_name,
optional=True,
name="Child name",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="child__birthday",
description=(
"Birthdate of child associated with this response. Must be redacted for publication of data (switch to "
"age at time of participation; either use rounded age, jitter the age, or redact timestamps of "
"participation)."
),
extractor=lambda resp: resp.child.birthday,
optional=True,
name="Birthdate",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="child__age_in_days",
description=(
"Age in days at time of response of child associated with this response, exact. This can be used in "
"conjunction with timestamps to calculate the child's birthdate, so must be jittered or redacted prior "
"to publication unless no timestamp information is shared."
),
extractor=lambda resp: (resp.date_created.date() - resp.child.birthday).days,
optional=True,
name="Age in days",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="child__age_rounded",
description=(
"Age in days at time of response of child associated with this response, rounded to the nearest 10 "
"days if under 1 year old and to the nearest 30 days if over 1 year old. May be published; however, if "
"you have more than a few sessions per participant it would be possible to infer the exact age in days "
"(and therefore birthdate) with some effort. In this case you might consider directly jittering "
"birthdates."
),
extractor=lambda resp: str(
round_age(int((resp.date_created.date() - resp.child.birthday).days))
)
if (resp.date_created and resp.child.birthday)
else "",
optional=True,
name="Rounded age",
include_by_default=True,
identifiable=False,
),
ResponseDataColumn(
id="child__gender",
description=(
"Parent-identified gender of child, one of 'm' (male), 'f' (female), 'o' (other), or 'na' (prefer not "
"to answer)"
),
extractor=lambda resp: resp.child.gender,
optional=True,
name="Child gender",
include_by_default=True,
identifiable=False,
),
ResponseDataColumn(
id="child__age_at_birth",
description=(
"Gestational age at birth in weeks. One of '40 or more weeks', '39 weeks' through '24 weeks', "
"'Under 24 weeks', or 'Not sure or prefer not to answer'"
),
extractor=lambda resp: resp.child.age_at_birth,
optional=True,
name="Child gestational age",
include_by_default=True,
identifiable=False,
),
ResponseDataColumn(
id="child__language_list",
description="List of languages spoken (using language codes in Lookit docs), separated by spaces",
extractor=lambda resp: resp.child.language_list,
optional=True,
name="Child languages",
include_by_default=True,
identifiable=False,
),
ResponseDataColumn(
id="child__condition_list",
description="List of child characteristics (using condition/characteristic codes in Lookit docs), separated by spaces",
extractor=lambda resp: resp.child.condition_list,
optional=True,
name="Child conditions",
include_by_default=True,
identifiable=False,
),
ResponseDataColumn(
id="child__additional_information",
description=(
"Free response 'anything else you'd like us to know' field on child registration form for child "
"associated with this response. Should be redacted or reviewed prior to publication as it may include "
"names or other identifying information."
),
extractor=lambda resp: resp.child.additional_information,
optional=True,
name="Child additional information",
include_by_default=True,
identifiable=True,
),
ResponseDataColumn(
id="response__sequence",
description=(
"Each response_sequence.N field (response_sequence.0, response_sequence.1, etc.) gives the ID of the "
"Nth frame displayed during the session associated with this response. Responses may have different "
"sequences due to randomization or if a participant leaves early."
),
extractor=lambda resp: resp.sequence,
name="Response sequence",
),
ResponseDataColumn(
id="response__conditions",
description=(
"RESEARCHERS: EXPAND THIS SECTION BASED ON YOUR INDIVIDUAL STUDY. Each set of "
"response_conditions.N.(...) fields give information about condition assignment during a particular "
"frame of this study. response_conditions.0.frameName is the frame ID (corresponding to a value in "
"response_sequence) where the randomization occurred. Additional fields such as "
"response_conditions.0.conditionNum depend on the specific randomizer frames used in this study."
),
extractor=lambda resp: [
{**{"frameName": cond_frame}, **conds}
for (cond_frame, conds) in resp.conditions.items()
],
),
]
# Columns for demographic data downloads. Extractor functions expect Response values dict,
# rather than instance.
DEMOGRAPHIC_COLUMNS = [
ResponseDataColumn(
id="response__uuid",
description=(
"Primary unique identifier for response. Can be used to match demographic data to response data "
"and video filenames; must be redacted prior to publication if videos are also published."
),
extractor=lambda resp: str(resp["uuid"]),
name="Response UUID",
),
ResponseDataColumn(
id="participant__global_id",
description=(
"Unique identifier for family account associated with this response. Will be the same for multiple "
"responses from a child and for siblings, and across different studies. MUST BE REDACTED FOR "
"PUBLICATION because this allows identification of families across different published studies, "
"which may have unintended privacy consequences. Researchers can use this ID to match participants "
"across studies (subject to their own IRB review), but would need to generate their own random "
"participant IDs for publication in that case. Use participant__hashed_id as a publication-safe "
"alternative if only analyzing data from one Lookit study."
),
extractor=lambda resp: str(resp["child__user__uuid"]),
optional=True,
name="Parent global ID",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="participant__hashed_id",
description=(
"Identifier for family account associated with this response. Will be the same for multiple "
"responses from a child and for siblings, but is unique to this study. This may be published "
"directly."
),
extractor=lambda resp: hash_participant_id(resp),
name="Participant ID",
),
ResponseDataColumn(
id="demographic__hashed_id",
description=(
"Identifier for this demographic snapshot. Changes upon updates to the demographic form, "
"so may vary within the same participant across responses."
),
extractor=lambda resp: hash_demographic_id(resp),
name="Demographic ID",
),
ResponseDataColumn(
id="demographic__date_created",
description=(
"Timestamp of creation of the demographic snapshot associated with this response, in format e.g. "
"2019-10-02 21:39:03.713283+00:00"
),
extractor=lambda resp: str(resp["demographic_snapshot__created_at"]),
name="Date created",
),
ResponseDataColumn(
id="demographic__number_of_children",
description="Response to 'How many children do you have?'; options 0-10 or >10 (More than 10)",
extractor=lambda resp: resp["demographic_snapshot__number_of_children"],
name="Number of children",
),
ResponseDataColumn(
id="demographic__child_rounded_ages",
description=(
"List of rounded ages based on child birthdays entered in demographic form (not based on children "
"registered). Ages are at time of response for this row, in days, rounded to nearest 10 for ages "
"under 1 year and nearest 30 otherwise. In format e.g. [60, 390]"
),
extractor=lambda resp: round_ages_from_birthdays(
resp["demographic_snapshot__child_birthdays"], resp["date_created"]
),
name="Child ages rounded",
),
ResponseDataColumn(
id="demographic__languages_spoken_at_home",
description="Freeform response to 'What language(s) does your family speak at home?'",
extractor=lambda resp: resp["demographic_snapshot__languages_spoken_at_home"],
name="Languages spoken at home",
),
ResponseDataColumn(
id="demographic__number_of_guardians",
description="Response to 'How many parents/guardians do your children live with?' - 1, 2, 3> [3 or more], varies",
extractor=lambda resp: resp["demographic_snapshot__number_of_guardians"],
name="Number of guardians",
),
ResponseDataColumn(
id="demographic__number_of_guardians_explanation",
description=(
"Freeform response to 'If the answer varies due to shared custody arrangements or travel, please "
"enter the number of parents/guardians your children are usually living with or explain.'"
),
extractor=lambda resp: resp[
"demographic_snapshot__number_of_guardians_explanation"
],
name="Number of guardians explanation",
),
ResponseDataColumn(
id="demographic__race_identification",
description=(
"Comma-separated list of all values checked for question 'What category(ies) does your family "
"identify as?', from list: White; Hispanic, Latino, or Spanish origin; Black or African American; "
"Asian; American Indian or Alaska Native; Middle Eastern or North African; Native Hawaiian or "
"Other Pacific Islander; Another race, ethnicity, or origin"
),
extractor=lambda resp: resp["demographic_snapshot__race_identification"],
name="Race",
),
ResponseDataColumn(
id="demographic__parent_age",
description=(
"Parent's response to question 'What is your age?'; options are <18, 18-21, 22-24, 25-29, 30-34, "
"35-39, 40-44, 45-49, 50s, 60s, >70"
),
extractor=lambda resp: resp["demographic_snapshot__age"],
name="Parent age",
),
ResponseDataColumn(
id="demographic__parent_gender",
description=(
"Parent's response to question 'What is your gender?'; options are m [male], f [female], o "
"[other], na [prefer not to answer]"
),
extractor=lambda resp: resp["demographic_snapshot__gender"],
name="Parent age",
),
ResponseDataColumn(
id="demographic__education_level",
description=(
"Parent's response to question 'What is the highest level of education you've completed?'; options "
"are some [some or attending high school], hs [high school diploma or GED], col [some or attending "
"college], assoc [2-year college degree], bach [4-year college degree], grad [some or attending "
"graduate or professional school], prof [graduate or professional degree]"
),
extractor=lambda resp: resp["demographic_snapshot__education_level"],
name="Parent education level",
),
ResponseDataColumn(
id="demographic__spouse_education_level",
description=(
"Parent's response to question 'What is the highest level of education your spouse has "
"completed?'; options are some [some or attending high school], hs [high school diploma or GED], "
"col [some or attending college], assoc [2-year college degree], bach [4-year college degree], "
"grad [some or attending graduate or professional school], prof [graduate or professional degree], "
"na [not applicable - no spouse or partner]"
),
extractor=lambda resp: resp["demographic_snapshot__spouse_education_level"],
name="Parent education level",
),
ResponseDataColumn(
id="demographic__annual_income",
description=(
"Parent's response to question 'What is your approximate family yearly income (in US dollars)?'; "
"options are 0, 5000, 10000, 15000, 20000-19000 in increments of 10000, >200000, or na [prefer not "
"to answer]"
),
extractor=lambda resp: resp["demographic_snapshot__annual_income"],
name="Annual income",
),
ResponseDataColumn(
id="demographic__number_of_books",
description="Parent's response to question 'About how many children's books are there in your home?'; integer",
extractor=lambda resp: resp["demographic_snapshot__number_of_books"],
name="Number of books",
),
ResponseDataColumn(
id="demographic__additional_comments",
description="Parent's freeform response to question 'Anything else you'd like us to know?'",
extractor=lambda resp: resp["demographic_snapshot__additional_comments"],
name="Additional comments",
),
ResponseDataColumn(
id="demographic__country",
description="Parent's response to question 'What country do you live in?'; 2-letter country code",
extractor=lambda resp: resp["demographic_snapshot__country"],
name="Country code",
),
ResponseDataColumn(
id="demographic__state",
description=(
"Parent's response to question 'What state do you live in?' if country is US; 2-letter state "
"abbreviation"
),
extractor=lambda resp: resp["demographic_snapshot__state"],
name="US State",
),
ResponseDataColumn(
id="demographic__density",
description=(
"Parent's response to question 'How would you describe the area where you live?'; options are "
"urban, suburban, rural"
),
extractor=lambda resp: resp["demographic_snapshot__density"],
name="Density",
),
ResponseDataColumn(
id="demographic__lookit_referrer",
description="Parent's freeform response to question 'How did you hear about Lookit?'",
extractor=lambda resp: resp["demographic_snapshot__lookit_referrer"],
name="How you heard about Lookit",
),
]
# Which headers from the response data summary should go in the child data downloads
CHILD_CSV_HEADERS = [
col.id
for col in RESPONSE_COLUMNS
if col.id.startswith("child__") or col.id.startswith("participant__")
]
IDENTIFIABLE_DATA_HEADERS = {col.id for col in RESPONSE_COLUMNS if col.identifiable}
def get_response_headers(
selected_header_ids: Union[Set, List],
all_available_header_ids: Union[Set, KeysView],
) -> List:
"""Get ordered list of response headers for download.
Select and order the appropriate headers to include in a file download, based on
which optional headers are selected and which headers are available.
Args:
selected_header_ids: which optional headers to include (corresponding to id values in
RESPONSE_COLUMNS). Headers that are specified as optional in RESPONSE_COLUMNS will
only be included if listed in selected_header_ids.
all_available_header_ids: all header ids we have data for. Any header ids that are in
this set but not in RESPONSE_COLUMNS will be added to the end of the output list.
Returns:
List of headers to include, consisting of the following in order:
1) Headers in RESPONSE_COLUMNS, in order, omitting any that are optional and were not selected
2) Extra headers from all_available_header_ids not included in (1), in alpha order
"""
unselected_optional_ids = {
col.id
for col in RESPONSE_COLUMNS
if col.optional and col.id not in selected_header_ids
}
selected_standard_header_ids = [
col.id
for col in RESPONSE_COLUMNS[0:-2]
if col.id not in unselected_optional_ids
]
return selected_standard_header_ids + sorted(
list(
all_available_header_ids
- set(selected_standard_header_ids)
- unselected_optional_ids
)
)
def get_demographic_headers(selected_header_ids=None) -> List[str]:
"""Get ordered list of demographic headers for download.
Args:
selected_header_ids(set or list): which optional headers to include (corresponding
to id values in DEMOGRAPHIC_COLUMNS).
Returns:
Ordered list of headers to include in download
Headers are id values from DEMOGRAPHIC_COLUMNS in order, omitting any that are optional
and were not included in selected_header_ids.
"""
if selected_header_ids is None:
selected_header_ids = {}
return [
col.id
for col in DEMOGRAPHIC_COLUMNS
if col.id in selected_header_ids or not col.optional
]
def construct_response_dictionary(
resp, columns, optional_headers, include_exp_data=True
):
if optional_headers is None:
optional_headers = {}
resp_dict = {}
for col in columns:
if col.id in optional_headers or not col.optional:
try:
object_name, field_name = col.id.split("__")
if object_name in resp_dict:
resp_dict[object_name][field_name] = col.extractor(resp)
else:
resp_dict[object_name] = {field_name: col.extractor(resp)}
except ValueError:
resp_dict[col.id] = col.extractor(resp)
# Include exp_data field in dictionary?
if include_exp_data:
resp_dict["exp_data"] = resp.exp_data
return resp_dict
class FrameDataRow(NamedTuple):
response_uuid: str
child_hashed_id: str
frame_id: str
event_number: str
key: str
value: str
FRAME_DATA_HEADER_DESCRIPTIONS = {
"response_uuid": "Unique identifier for this response; can be matched to summary data and video filenames",
"child_hashed_id": (
"Hashed identifier for the child associated with this response; can be matched to summary data "
"child_hashed_id. This random ID may be published directly; it is specific to this study. If you "
"need to match children across multiple studies, use the child_global_id."
),
"frame_id": (
"Identifier for the particular frame responsible for this data; matches up to an element in the "
"response_sequence in the summary data file"
),
"event_number": (
"Index of the event responsible for this data, if this is an event. Indexes start from 0 within each "
"frame (and within global data) within each response. Blank for non-event data."
),
"key": "Label for a piece of data collected during this frame - for example, 'formData.child_favorite_animal'",
"value": "Value of the data associated with this key (of the indexed event if applicable) - for example, 'giraffe'",
}
def get_frame_data(resp: Union[Response, Dict]) -> List[FrameDataRow]:
"""Get list of data stored in response's exp_data and global_event_timings fields.
Args:
resp(Response or dict): response data to process. If dict, must contain fields
child__uuid, study__uuid, study__salt, study__hash_digits, uuid, exp_data, and
global_event_timings.
Returns:
List of FrameDataRows each representing a single piece of data from global_event_timings or
exp_data. Descriptions of each field of the FrameDataRow are given in FRAME_DATA_HEADER_DESCRIPTIONS.
"""
if type(resp) is not dict:
resp = {
"child__uuid": resp.child.uuid,
"study__uuid": resp.study.uuid,
"study__salt": resp.study.salt,
"study__hash_digits": resp.study.hash_digits,
"uuid": resp.uuid,
"exp_data": resp.exp_data,
"global_event_timings": resp.global_event_timings,
}
frame_data_tuples = []
child_hashed_id = hash_id(
resp["child__uuid"],
resp["study__uuid"],
resp["study__salt"],
resp["study__hash_digits"],
)
# First add all of the global event timings as events with frame_id "global"
for (iEvent, event) in enumerate(resp["global_event_timings"]):
for (key, value) in event.items():
frame_data_tuples.append(
FrameDataRow(
child_hashed_id=child_hashed_id,
response_uuid=str(resp["uuid"]),
frame_id="global",
key=key,
event_number=str(iEvent),
value=value,
)
)
# Next add all data in exp_data
event_prefix = "eventTimings."
for frame_id, frame_data in resp["exp_data"].items():
for (key, value) in flatten_dict(frame_data).items():
# Process event data separately and include event_number within frame
if key.startswith(event_prefix):
key_pieces = key.split(".")
frame_data_tuples.append(
FrameDataRow(
child_hashed_id=child_hashed_id,
response_uuid=str(resp["uuid"]),
frame_id=frame_id,
key=".".join(key_pieces[2:]),
event_number=str(key_pieces[1]),
value=value,
)
)
# omit frameType values from CSV
elif key == "frameType":
continue
# Omit the DOB from any exit survey
elif key == "birthDate" and frame_data.get("frameType", None) == "EXIT":
continue
# Omit empty generatedProperties values from CSV
elif key == "generatedProperties" and not value:
continue
# For all other data, create a regular entry with frame_id and no event #
else:
frame_data_tuples.append(
FrameDataRow(
child_hashed_id=child_hashed_id,
response_uuid=str(resp["uuid"]),
frame_id=frame_id,
key=key,
event_number="",
value=value,
)
)
return frame_data_tuples
def build_framedata_dict_csv(writer, responses):
response_paginator = Paginator(responses, RESPONSE_PAGE_SIZE)
unique_frame_ids = set()
event_keys = set()
unique_frame_keys_dict = {}
for page_num in response_paginator.page_range:
page_of_responses = response_paginator.page(page_num)
for resp in page_of_responses:
this_resp_data = get_frame_data(resp)
these_ids = {
d.frame_id.partition("-")[2]
for d in this_resp_data
if not d.frame_id == "global"
}
event_keys = event_keys | {
d.key for d in this_resp_data if d.event_number != ""
}
unique_frame_ids = unique_frame_ids | these_ids
for frame_id in these_ids:
these_keys = {
d.key
for d in this_resp_data
if d.frame_id.partition("-")[2] == frame_id and d.event_number == ""
}
if frame_id in unique_frame_keys_dict:
unique_frame_keys_dict[frame_id] = (
unique_frame_keys_dict[frame_id] | these_keys
)
else:
unique_frame_keys_dict[frame_id] = these_keys
# Start with general descriptions of high-level headers (child_id, response_id, etc.)
writer.writerows(
[
{"column": header, "description": description}
for (header, description) in FRAME_DATA_HEADER_DESCRIPTIONS.items()
]
)
writer.writerow(
{
"possible_frame_id": "global",
"frame_description": "Data not associated with a particular frame",
}
)
# Add placeholders to describe each frame type
unique_frame_ids = sorted(list(unique_frame_ids))
for frame_id in unique_frame_ids:
writer.writerow(
{
"possible_frame_id": "*-" + frame_id,
"frame_description": "RESEARCHER: INSERT FRAME DESCRIPTION",
}
)
unique_frame_keys = sorted(list(unique_frame_keys_dict[frame_id]))
for k in unique_frame_keys:
writer.writerow(
{
"possible_frame_id": "*-" + frame_id,
"possible_key": k,
"key_description": "RESEARCHER: INSERT DESCRIPTION OF WHAT THIS KEY MEANS IN THIS FRAME",
}
)
event_keys = sorted(list(event_keys))
event_key_stock_descriptions = {
"eventType": (
"Descriptor for this event; determines what other data is available. Global event 'exitEarly' records "
"cases where the participant attempted to exit the study early by closing the tab/window or pressing F1 "
"or ctrl-X. RESEARCHER: INSERT DESCRIPTIONS OF PARTICULAR EVENTTYPES USED IN YOUR STUDY. (Note: you can "
"find a list of events recorded by each frame in the frame documentation at "
"https://lookit.github.io/ember-lookit-frameplayer, under the Events header.)"
),
"exitType": (
"Used in the global event exitEarly. Only value stored at this point is 'browserNavigationAttempt'"
),
"lastPageSeen": (
"Used in the global event exitEarly. Index of the frame the participant was on before exit attempt."
),
"pipeId": (
"Recorded by any event in a video-capture-equipped frame. Internal video ID used by Pipe service; only "
"useful for troubleshooting in rare cases."
),
"streamTime": (
"Recorded by any event in a video-capture-equipped frame. Indicates time within webcam "
"video (videoId) to nearest 0.1 second. If recording has not started yet, may be 0 or null."
),
"timestamp": "Recorded by all events. Timestamp of event in format e.g. 2019-11-07T17:14:43.626Z",
"videoId": (
"Recorded by any event in a video-capture-equipped frame. Filename (without .mp4 extension) of video "
"currently being recorded."
),
}
for k in event_keys:
writer.writerow(
{
"possible_frame_id": "any (event data)",
"possible_key": k,
"key_description": event_key_stock_descriptions.get(
k, "RESEARCHER: INSERT DESCRIPTION OF WHAT THIS EVENT KEY MEANS"
),
}
)
def build_single_response_framedata_csv(response):
"""
Builds CSV file contents for frame-level data from a single response. Used for both
building zip archive of all response data & offering individual-file downloads on individual responses view.
"""
this_resp_data = get_frame_data(response)
output, writer = csv_namedtuple_writer(FrameDataRow)
writer.writerows(this_resp_data)
return output.getvalue()
class ResponseDownloadMixin(CanViewStudyResponsesMixin, MultipleObjectMixin):
model = Response
paginate_by = 10
ordering = "id"
def get_queryset(self):
study = self.study
return study.responses_for_researcher(self.request.user).order_by(
self.get_ordering()
)
class DemographicDownloadMixin(CanViewStudyResponsesMixin, MultipleObjectMixin):
model = Response
paginate_by = 10
ordering = "id"
def get_queryset(self):
study = self.study
return (
study.responses_for_researcher(self.request.user)
.order_by(self.get_ordering())
.select_related("child", "child__user", "study", "demographic_snapshot")
.values(
"uuid",
"date_created",
"child__user__uuid",
"study__uuid",
"study__salt",
"study__hash_digits",
"demographic_snapshot__uuid",
"demographic_snapshot__created_at",
"demographic_snapshot__number_of_children",
"demographic_snapshot__child_birthdays",
"demographic_snapshot__languages_spoken_at_home",
"demographic_snapshot__number_of_guardians",
"demographic_snapshot__number_of_guardians_explanation",
"demographic_snapshot__race_identification",
"demographic_snapshot__age",
"demographic_snapshot__gender",
"demographic_snapshot__education_level",
"demographic_snapshot__spouse_education_level",
"demographic_snapshot__annual_income",
"demographic_snapshot__number_of_books",
"demographic_snapshot__additional_comments",
"demographic_snapshot__country",
"demographic_snapshot__state",
"demographic_snapshot__density",
"demographic_snapshot__lookit_referrer",
"demographic_snapshot__extra",
)
)
class StudyResponsesList(ResponseDownloadMixin, generic.ListView):
"""
View to display a list of study responses.
"""
template_name = "studies/study_responses.html"
def get_ordering(self):
"""
Determine sort field and order. Sorting on id actually sorts on child id, not response id.
Sorting on status, actually sorts on 'completed' field, where we are alphabetizing
"in progress" and "completed"
"""
orderby = self.request.GET.get("sort", "id")
return orderby.replace("id", "child__id").replace("status", "completed")
def get_queryset(self):
return (
super()
.get_queryset()
.prefetch_related(
"consent_rulings__arbiter",
Prefetch(
"feedback",
queryset=Feedback.objects.select_related("researcher").order_by(
"-id"
),
),
)
)
def get_context_data(self, **kwargs):
"""
In addition to the study, adds several items to the context dictionary. Study results
are paginated.
"""
context = super().get_context_data(**kwargs)
context["study"] = study = self.study
paginated_responses = context["object_list"]
columns_included_in_summary = study.columns_included_in_summary()
columns_included_in_table = [
"child__hashed_id",
"response__uuid",
"response__id",
"response__status",
"response__completed",
"response__is_preview",
]
response_data = []
for resp in paginated_responses:
# Info needed for table display of individual responses
this_resp_data = {
col.id: col.extractor(resp)
for col in RESPONSE_COLUMNS
if col.id in columns_included_in_table
}
# Exception - store actual date object for date created
this_resp_data["response__date_created"] = resp.date_created
# info needed for summary table shown at right
this_resp_data["summary"] = [
{
"name": col.name,
"value": col.extractor(resp),
"description": col.description,
}
for col in RESPONSE_COLUMNS
if col.id in columns_included_in_summary
]
this_resp_data["videos"] = resp.videos.values("pk", "full_name")
for v in this_resp_data["videos"]:
v["display_name"] = (
v["full_name"]
.replace("videoStream_{}_".format(study.uuid), "...")
.replace("_{}_".format(resp.uuid), "...")
)
response_data.append(this_resp_data)
context["response_data"] = response_data
context["data_options"] = [col for col in RESPONSE_COLUMNS if col.optional]
context["can_view_regular_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_RESPONSE_DATA, context["study"]
)
context["can_view_preview_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_PREVIEW_DATA, context["study"]
)
context["can_edit_feedback"] = self.request.user.has_study_perms(
StudyPermission.EDIT_STUDY_FEEDBACK, context["study"]
)
return context
def build_video_display_name(self, study_uuid, response_uuid, vid_name):
"""
Strips study_uuid and response_uuid out of video responses titles for better display.
"""
return ". . ." + ". . .".join(
vid_name.split(study_uuid + "_")[1].split("_" + response_uuid + "_")
)
class StudySingleResponseDownload(ResponseDownloadMixin, View):
"""
Download a single study response in the selected format with selected headers.
"""
def get(self, *args, **kwargs):
data_type = self.request.GET.get("data-type-selector", None)
if data_type not in ["json", "csv", "framedata"]:
raise SuspiciousOperation
response_id = self.request.GET.get("response_id", None)
try:
resp = self.get_queryset().get(pk=response_id)
except ObjectDoesNotExist:
raise SuspiciousOperation
study = self.study
header_options = set(self.request.GET.getlist("data_options"))
extension = "json" if data_type == "json" else "csv"
filename = "{}_{}{}.{}".format(
study_name_for_files(study.name),
str(resp.uuid),
"_frames"
if data_type == "json"
else "_identifiable"
if IDENTIFIABLE_DATA_HEADERS & header_options
else "",
extension,
)
if data_type == "json":
cleaned_data = json.dumps(
construct_response_dictionary(resp, RESPONSE_COLUMNS, header_options),
indent="\t",
default=str,
)
elif data_type == "csv":
row_data = flatten_dict(
{col.id: col.extractor(resp) for col in RESPONSE_COLUMNS}
)
header_list = get_response_headers(header_options, row_data.keys())
output, writer = csv_dict_output_and_writer(header_list)
writer.writerow(row_data)
cleaned_data = output.getvalue()
elif data_type == "framedata":
cleaned_data = build_single_response_framedata_csv(resp)
else:
raise SuspiciousOperation
response = HttpResponse(cleaned_data, content_type="text/{}".format(extension))
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyResponseVideoAttachment(
ResearcherLoginRequiredMixin, UserPassesTestMixin, StudyLookupMixin, View
):
"""
View that redirects to a requested video for a study response.
"""
raise_exception = True
@cached_property
def video(self):
# Only select the video from consented videos for this study
return self.study.videos_for_consented_responses.get(
pk=self.kwargs.get("video")
)
def can_view_this_video(self):
user = self.request.user
study = self.study
video = self.video
return user.is_researcher and (
(
user.has_study_perms(StudyPermission.READ_STUDY_RESPONSE_DATA, study)
and not video.response.is_preview
)
or (
user.has_study_perms(StudyPermission.READ_STUDY_PREVIEW_DATA, study)
and video.response.is_preview
)
)
test_func = can_view_this_video
def get(self, request, *args, **kwargs):
video = self.video
download_url = video.download_url
if self.request.GET.get("mode") == "download":
r = requests.get(download_url)
response = FileResponse(
File.open(io.BytesIO(r.content)),
filename=video.filename,
as_attachment=True,
)
return response
return redirect(download_url)
class StudyResponseSubmitFeedback(StudyLookupMixin, UserPassesTestMixin, View):
"""
View to create or edit response feedback.
"""
def user_can_edit_feedback(self):
user = self.request.user
study = self.study
# First check user has permission to be editing feedback from this study at all
if not user.is_researcher and user.has_study_perms(
StudyPermission.EDIT_STUDY_FEEDBACK, study
):
return False
# Check that the feedback_id (if given) is from this study
feedback_id = self.request.POST.get("feedback_id", None)
if feedback_id:
try:
feedback = Feedback.objects.get(id=feedback_id)
except ObjectDoesNotExist:
return False
if feedback.response.study_id != study.pk:
return False
# Check that the response_id (if given) is from this study
response_id = self.request.POST.get("response_id", None)
if response_id:
try:
response = Response.objects.get(id=int(response_id))
except ObjectDoesNotExist:
return False
if response.study_id != study.pk:
return False
return True
test_func = user_can_edit_feedback
def post(self, request, *args, **kwargs):
"""
Create or edit feedback. Pass feedback_id to edit existing feedback, or response_id to create new
feedback for that response.
"""
form_data = self.request.POST
user = self.request.user
study = self.study
feedback_id = form_data.get("feedback_id", None)
comment = form_data.get("comment", "")
if feedback_id:
Feedback.objects.filter(id=feedback_id).update(comment=comment)
else:
response_id = int(form_data.get("response_id"))
Feedback.objects.create(
response_id=response_id, researcher=user, comment=comment
)
return HttpResponseRedirect(
reverse("exp:study-responses-list", kwargs=dict(pk=study.pk))
)
class StudyResponsesConsentManager(
ResearcherLoginRequiredMixin,
UserPassesTestMixin,
SingleObjectFetchProtocol[Study],
generic.DetailView,
):
"""Manage consent videos from here: approve or reject as evidence of informed consent."""
template_name = "studies/study_responses_consent_ruling.html"
queryset = Study.objects.all()
raise_exception = True
def user_can_code_consent(self):
user = self.request.user
study = self.get_object()
return user.is_researcher and (
user.has_study_perms(StudyPermission.CODE_STUDY_CONSENT, study)
or user.has_study_perms(StudyPermission.CODE_STUDY_PREVIEW_CONSENT, study)
)
test_func = user_can_code_consent
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# Need to prefetch our responses with consent-footage videos.
study = context["study"]
# TODO: technically should not grant access to consent videos for preview data unless has that perm
# (or should clearly indicate that code_study_consent means preview + actual data)
preview_only = not self.request.user.has_study_perms(
StudyPermission.CODE_STUDY_CONSENT, study
)
responses = get_responses_with_current_rulings_and_videos(
study.id, preview_only
)
context["loaded_responses"] = responses
context["summary_statistics"] = get_consent_statistics(study.id, preview_only)
# Using a map for arbitrarily structured data - lists and objects that we can't just trivially shove onto
# data-* properties in HTML
response_key_value_store = {}
paginator = Paginator(responses, RESPONSE_PAGE_SIZE)
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
# two jobs - generate statistics and populate k/v store.
for response in page_of_responses:
response_json = response_key_value_store[str(response["uuid"])] = {}
response["uuid"] = str(response.pop("uuid"))
response_json["videos"] = response.pop("videos")
response_json["details"] = {
"general": {
"uuid": response["uuid"],
"global_event_timings": json.dumps(
response.pop("global_event_timings")
),
"sequence": json.dumps(response.pop("sequence")),
"completed": json.dumps(response.pop("completed")),
"date_created": str(response["date_created"]),
},
"participant": {
"hashed_id": hash_participant_id(response),
"uuid": str(response.pop("child__user__uuid")),
"nickname": response.pop("child__user__nickname"),
},
"child": {
"hashed_id": hash_child_id(response),
"uuid": str(response.pop("child__uuid")),
"name": response.pop("child__given_name"),
"birthday": str(response.pop("child__birthday")),
"gender": response.pop("child__gender"),
"additional_information": response.pop(
"child__additional_information"
),
},
}
# TODO: Use json_script template tag to create JSON that can be used in Javascript
# (see https://docs.djangoproject.com/en/3.0/ref/templates/builtins/#json-script)
context["response_key_value_store"] = json.dumps(response_key_value_store)
return context
def post(self, request, *args, **kwargs):
"""This is where consent rulings are submitted."""
form_data = self.request.POST
user = self.request.user
study = self.get_object()
preview_only = not self.request.user.has_study_perms(
StudyPermission.CODE_STUDY_CONSENT, study
)
# Only allow any action on preview responses unless full perms
responses = study.responses
if preview_only:
responses = responses.filter(is_preview=True)
comments = json.loads(form_data.get("comments"))
# We now accept pending rulings to reverse old reject/approve decisions.
for ruling in ("accepted", "rejected", "pending"):
judged_responses = responses.filter(uuid__in=form_data.getlist(ruling))
for response in judged_responses:
response.consent_rulings.create(
action=ruling,
arbiter=user,
comments=comments.pop(str(response.uuid), None),
)
response.save()
# if there are any comments left over, these will count as new rulings that are the same as the last.
if comments:
for resp_uuid, comment in comments.items():
response = responses.get(uuid=resp_uuid)
response.consent_rulings.create(
action=response.most_recent_ruling, arbiter=user, comments=comment
)
return HttpResponseRedirect(
reverse(
"exp:study-responses-consent-manager",
kwargs=dict(pk=self.get_object().pk),
)
)
def get(self, request, *args, **kwargs):
if self.get_object().study_type.is_external:
messages.error(request, "There is no consent manager for external studies.")
return HttpResponseRedirect(reverse("exp:study-detail", kwargs=kwargs))
else:
return super().get(request, *args, **kwargs)
class StudyResponsesAll(
CanViewStudyResponsesMixin, SingleObjectFetchProtocol[Study], generic.DetailView
):
"""
StudyResponsesAll shows a variety of download options for response and child data
from a given study. (It does not actually show any data.)
"""
template_name = "studies/study_responses_all.html"
queryset = Study.objects.all()
http_method_names = ["get"]
def get_context_data(self, **kwargs):
"""
In addition to the study, adds several items to the context dictionary.
"""
context = super().get_context_data(**kwargs)
context["n_responses"] = (
context["study"].responses_for_researcher(self.request.user).count()
)
context["data_options"] = [col for col in RESPONSE_COLUMNS if col.optional]
context["can_delete_preview_data"] = self.request.user.has_study_perms(
StudyPermission.DELETE_ALL_PREVIEW_DATA, context["study"]
)
context["can_view_regular_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_RESPONSE_DATA, context["study"]
)
context["can_view_preview_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_PREVIEW_DATA, context["study"]
)
return context
class StudyDeletePreviewResponses(
ResearcherLoginRequiredMixin,
UserPassesTestMixin,
SingleObjectFetchProtocol[Study],
SingleObjectMixin,
View,
):
queryset = Study.objects.all()
def user_can_delete_preview_data(self):
user = self.request.user
study = self.get_object()
return user.is_researcher and user.has_study_perms(
StudyPermission.DELETE_ALL_PREVIEW_DATA, study
)
test_func = user_can_delete_preview_data
def post(self, request, *args, **kwargs):
"""
Post method on all responses view handles the 'delete all preview data' button.
"""
study = self.get_object()
# Note: delete all, not just consented!
preview_responses = study.responses.filter(is_preview=True).prefetch_related(
"videos", "responselog_set", "consent_rulings", "feedback"
)
paginator = Paginator(preview_responses, RESPONSE_PAGE_SIZE)
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
# response logs, consent rulings, feedback, videos will all be deleted
# via cascades - videos will be removed from S3 also on pre_delete hook
resp.delete()
return HttpResponseRedirect(
reverse("exp:study-responses-all", kwargs={"pk": study.id})
)
class StudyResponsesJSON(ResponseDownloadMixin, generic.list.ListView):
"""
Hitting this URL downloads all study responses in JSON format.
"""
# Smaller pagination because individual responses may be large and we don't want the json representing 100
# responses in memory
paginate_by = 1
def make_chunk(self, paginator, page_num, header_options):
chunk = ""
if page_num == 1:
chunk = "[\n"
chunk += ",\n".join(
json.dumps(
construct_response_dictionary(resp, RESPONSE_COLUMNS, header_options),
indent="\t", # Use tab rather than spaces to make file smaller (ex. 60MB -> 25MB)
default=str,
)
for resp in paginator.page(page_num)
)
if page_num == paginator.page_range[-1]:
chunk += "\n]"
else:
chunk += ",\n"
return chunk
def render_to_response(self, context, **response_kwargs):
paginator = context["paginator"]
study = self.study
header_options = set(self.request.GET.getlist("data_options"))
filename = "{}_{}.json".format(
study_name_for_files(study.name),
"all-responses"
+ ("-identifiable" if IDENTIFIABLE_DATA_HEADERS & header_options else ""),
)
response = StreamingHttpResponse(
(
self.make_chunk(paginator, page_num, header_options)
for page_num in paginator.page_range
),
content_type="text/json",
)
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyResponsesCSV(ResponseDownloadMixin, generic.list.ListView):
"""
Hitting this URL downloads a summary of all study responses in CSV format.
"""
def render_to_response(self, context, **response_kwargs):
paginator = context["paginator"]
study = self.study
headers = set()
session_list = []
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
row_data = flatten_dict(
{col.id: col.extractor(resp) for col in RESPONSE_COLUMNS}
)
# Add any new headers from this session
headers = headers | row_data.keys()
session_list.append(row_data)
header_options = set(self.request.GET.getlist("data_options"))
header_list = get_response_headers(header_options, headers)
output, writer = csv_dict_output_and_writer(header_list)
writer.writerows(session_list)
cleaned_data = output.getvalue()
filename = "{}_{}.csv".format(
study_name_for_files(study.name),
"all-responses"
+ ("-identifiable" if IDENTIFIABLE_DATA_HEADERS & header_options else ""),
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyResponsesDictCSV(CanViewStudyResponsesMixin, View):
"""
Hitting this URL downloads a data dictionary for the study response summary in CSV format. Does not depend on actual response data.
"""
def build_summary_dict_csv(self, optional_headers_selected_ids):
"""
Builds CSV file contents for data dictionary corresponding to the overview CSV
"""
descriptions = {col.id: col.description for col in RESPONSE_COLUMNS}
header_list = get_response_headers(
optional_headers_selected_ids, descriptions.keys()
)
all_descriptions = [
{"column": header, "description": descriptions[header]}
for header in header_list
]
output, writer = csv_dict_output_and_writer(["column", "description"])
writer.writerows(all_descriptions)
return output.getvalue()
def get(self, request, *args, **kwargs):
study = self.study
header_options = self.request.GET.getlist("data_options")
cleaned_data = self.build_summary_dict_csv(header_options)
filename = "{}_{}.csv".format(
study_name_for_files(study.name), "all-responses-dict"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyChildrenCSV(ResponseDownloadMixin, generic.list.ListView):
"""
Hitting this URL downloads a summary of all children who participated in CSV format.
"""
def render_to_response(self, context, **response_kwargs):
paginator = context["paginator"]
study = self.study
child_list = []
session_list = []
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
row_data = flatten_dict(
{
col.id: col.extractor(resp)
for col in RESPONSE_COLUMNS
if col.id in CHILD_CSV_HEADERS
}
)
if row_data["child__global_id"] not in child_list:
child_list.append(row_data["child__global_id"])
session_list.append(row_data)
output, writer = csv_dict_output_and_writer(CHILD_CSV_HEADERS)
writer.writerows(session_list)
cleaned_data = output.getvalue()
filename = "{}_{}.csv".format(
study_name_for_files(study.name), "all-children-identifiable"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyChildrenDictCSV(CanViewStudyResponsesMixin, View):
"""
Hitting this URL downloads a data dictionary in CSV format for the summary of children who participated.
Does not depend on actual response data.
TODO: separate from response data mixin
"""
def build_child_dict_csv(self):
"""
Builds CSV file contents for data dictionary for overview of all child participants
"""
all_descriptions = [
{"column": col.id, "description": col.description}
for col in RESPONSE_COLUMNS
if col.id in CHILD_CSV_HEADERS
]
output, writer = csv_dict_output_and_writer(["column", "description"])
writer.writerows(all_descriptions)
return output.getvalue()
def get(self, request, *args, **kwargs):
study = self.study
cleaned_data = self.build_child_dict_csv()
filename = "{}_{}.csv".format(
study_name_for_files(study.name), "all-children-dict"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyResponsesFrameDataCSV(ResponseDownloadMixin, generic.list.ListView):
"""Hitting this URL downloads a ZIP file with frame data from one response per file in CSV format"""
# TODO: with large files / many responses generation can take a while. Should generate asynchronously along
# with the data dict.
def render_to_response(self, context, **response_kwargs):
paginator = context["paginator"]
study = self.study
if study.study_type.is_external:
messages.error(
self.request, "Frame data is not available for External Studies."
)
return redirect(reverse("exp:study-responses-all", kwargs={"pk": study.pk}))
zipped_file = io.BytesIO() # import io
with zipfile.ZipFile(zipped_file, "w", zipfile.ZIP_DEFLATED) as zipped:
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
data = build_single_response_framedata_csv(resp)
filename = "{}_{}_{}.csv".format(
study_name_for_files(study.name), resp.uuid, "frames"
)
zipped.writestr(filename, data)
zipped_file.seek(0)
response = FileResponse(
zipped_file,
as_attachment=True,
filename="{}_framedata_per_session.zip".format(
study_name_for_files(study.name)
),
)
return response
class StudyResponsesFrameDataDictCSV(ResponseDownloadMixin, View):
"""
Hitting this URL queues creation of a template data dictionary for frame-level data in CSV format.
The file is put on GCP and a link is emailed to the user.
"""
def get(self, request, *args, **kwargs):
study = self.study
if study.study_type.is_external:
messages.error(
request, "Frame data dictionary is not available for external studies"
)
else:
filename = "{}_{}_{}".format(
study_name_for_files(study.name), study.uuid, "all-frames-dict"
)
build_framedata_dict.delay(filename, study.uuid, self.request.user.uuid)
messages.success(
request,
f"A frame data dictionary for {study.name} is being generated. You will be emailed a link when it's completed.",
)
return HttpResponseRedirect(
reverse("exp:study-responses-all", kwargs=self.kwargs)
)
class StudyDemographics(
CanViewStudyResponsesMixin, SingleObjectFetchProtocol[Study], generic.DetailView
):
"""
StudyDemographics view shows participant demographic snapshots associated
with each response to the study
"""
template_name = "studies/study_demographics.html"
queryset = Study.objects.all()
def | (self, **kwargs):
"""
Adds information for displaying how many and which types of responses are available.
"""
context = super().get_context_data(**kwargs)
context["n_responses"] = (
context["study"].responses_for_researcher(self.request.user).count()
)
context["can_view_regular_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_RESPONSE_DATA, context["study"]
)
context["can_view_preview_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_PREVIEW_DATA, context["study"]
)
return context
class StudyDemographicsJSON(DemographicDownloadMixin, generic.list.ListView):
"""
Hitting this URL downloads all participant demographics in JSON format.
"""
def render_to_response(self, context, **response_kwargs):
study = self.study
header_options = self.request.GET.getlist("demo_options")
json_responses = []
paginator = context["paginator"]
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
json_responses.append(
json.dumps(
construct_response_dictionary(
resp,
DEMOGRAPHIC_COLUMNS,
header_options,
include_exp_data=False,
),
indent="\t",
default=str,
)
)
cleaned_data = f"[ {', '.join(json_responses)} ]"
filename = "{}_{}.json".format(
study_name_for_files(study.name), "all-demographic-snapshots"
)
response = HttpResponse(cleaned_data, content_type="text/json")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyDemographicsCSV(DemographicDownloadMixin, generic.list.ListView):
"""
Hitting this URL downloads all participant demographics in CSV format.
"""
def render_to_response(self, context, **response_kwargs):
study = self.study
paginator = context["paginator"]
header_options = set(self.request.GET.getlist("demo_options"))
participant_list = []
headers_for_download = get_demographic_headers(header_options)
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
row_data = {col.id: col.extractor(resp) for col in DEMOGRAPHIC_COLUMNS}
participant_list.append(row_data)
output, writer = csv_dict_output_and_writer(headers_for_download)
writer.writerows(participant_list)
cleaned_data = output.getvalue()
filename = "{}_{}.csv".format(
study_name_for_files(study.name), "all-demographic-snapshots"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyDemographicsDictCSV(DemographicDownloadMixin, generic.list.ListView):
"""
Hitting this URL downloads a data dictionary for participant demographics in in CSV format.
Does not depend on any actual data.
"""
def render_to_response(self, context, **response_kwargs):
header_options = set(self.request.GET.getlist("demo_options"))
headers_for_download = get_demographic_headers(header_options)
all_descriptions = [
{"column": col.id, "description": col.description}
for col in DEMOGRAPHIC_COLUMNS
if col.id in headers_for_download
]
output, writer = csv_dict_output_and_writer(["column", "description"])
writer.writerows(all_descriptions)
cleaned_data = output.getvalue()
filename = "{}_{}.csv".format(
study_name_for_files(self.study.name), "all-demographic-snapshots-dict"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyCollisionCheck(ResponseDownloadMixin, View):
"""
Hitting this URL checks for collisions among all child and account hashed IDs, and returns a string describing
any collisions (empty string if none).
"""
def get(self, request, *args, **kwargs):
study = self.study
responses = (
study.consented_responses.order_by("id")
.select_related("child", "child__user", "study")
.values(
"uuid",
"child__uuid",
"child__user__uuid",
"study__uuid",
"study__salt",
"study__hash_digits",
)
)
child_dict = {}
account_dict = {}
collision_text = ""
# Note: could also just check number of unique global vs. hashed IDs in full dataset;
# only checking one-by-one for more informative output.
paginator = Paginator(responses, RESPONSE_PAGE_SIZE)
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
participant_hashed_id = hash_participant_id(resp)
participant_global_id = resp["child__user__uuid"]
child_hashed_id = hash_child_id(resp)
child_global_id = resp["child__uuid"]
if participant_hashed_id in account_dict:
if participant_global_id != account_dict[participant_hashed_id]:
collision_text += "Participant hashed ID {} ({}, {})\n".format(
participant_hashed_id,
account_dict[participant_hashed_id],
participant_global_id,
)
else:
account_dict[participant_hashed_id] = participant_global_id
if child_hashed_id in child_dict:
if child_global_id != child_dict[child_hashed_id]:
collision_text += "Child hashed ID {} ({}, {})<br>".format(
child_hashed_id,
child_dict[child_hashed_id],
child_global_id,
)
else:
child_dict[child_hashed_id] = child_global_id
return JsonResponse({"collisions": collision_text})
class StudyAttachments(CanViewStudyResponsesMixin, generic.ListView):
"""
StudyAttachments View shows video attachments for the study
"""
template_name = "studies/study_attachments.html"
model = Video
paginate_by = 100
def get_ordering(self):
return self.request.GET.get("sort", "-created_at") or "-created_at"
def get_queryset(self):
"""Fetches all consented videos this user has access to.
Returns:
QuerySet: all videos from this study where response has been marked as
consented and response is of a type (preview/actual data) that user can view
Todo:
* use a helper (e.g. in queries) select_videos_for_user to fetch the
appropriate videos here and in build_zipfile_of_videos - deferring for the moment
to work out dependencies.
"""
study = self.study
videos = study.videos_for_consented_responses
if not self.request.user.has_study_perms(
StudyPermission.READ_STUDY_RESPONSE_DATA, study
):
videos = videos.filter(response__is_preview=True)
if not self.request.user.has_study_perms(
StudyPermission.READ_STUDY_PREVIEW_DATA, study
):
videos = videos.filter(response__is_preview=False)
match = self.request.GET.get("match", "")
if match:
videos = videos.filter(full_name__icontains=match)
return videos.order_by(self.get_ordering())
def get_context_data(self, **kwargs):
"""
In addition to the study, adds several items to the context dictionary. Study results
are paginated.
"""
context = super().get_context_data(**kwargs)
context["match"] = self.request.GET.get("match", "")
context["study"] = self.study
return context
def post(self, request, *args, **kwargs):
"""
Downloads study video
"""
match = self.request.GET.get("match", "")
study = self.study
if self.request.POST.get("all-attachments"):
build_zipfile_of_videos.delay(
f"{study.uuid}_videos",
study.uuid,
match,
self.request.user.uuid,
consent_only=False,
)
messages.success(
request,
f"An archive of videos for {study.name} is being generated. You will be emailed a link when it's completed.",
)
if self.request.POST.get("all-consent-videos"):
build_zipfile_of_videos.delay(
f"{study.uuid}_consent_videos",
study.uuid,
match,
self.request.user.uuid,
consent_only=True,
)
messages.success(
request,
f"An archive of consent videos for {study.name} is being generated. You will be emailed a link when it's completed.",
)
return HttpResponseRedirect(
reverse("exp:study-attachments", kwargs=self.kwargs)
)
| get_context_data |
0001_initial.py | from django.db import migrations
from api.user.models import CustomUser
class | (migrations.Migration):
def seed_data(apps, schema_editor):
user = CustomUser(
name = 'admin',
email = '[email protected]',
is_staff = True,
is_superuser = True,
phone = "9876554321",
gender = 'Male'
)
user.set_password('qwerty')
user.save()
dependencies = [ ]
operations = [
migrations.RunPython(seed_data),
] | Migration |
lib.rs | //! [`egui`] bindings for [`glium`](https://github.com/glium/glium).
//!
//! This library is an [`epi`] backend.
//!
//! If you are writing an app, you may want to look at [`eframe`](https://docs.rs/eframe) instead.
#![forbid(unsafe_code)]
#![cfg_attr(not(debug_assertions), deny(warnings))] // Forbid warnings in release builds
#![warn(clippy::all, rust_2018_idioms)]
#![allow(clippy::manual_range_contains, clippy::single_match)]
mod backend;
#[cfg(feature = "http")]
pub mod http;
mod painter;
#[cfg(feature = "persistence")]
pub mod persistence;
pub mod window_settings;
pub use backend::*;
pub use painter::Painter;
use {
clipboard::ClipboardProvider,
egui::*,
glium::glutin::{self, event::VirtualKeyCode, event_loop::ControlFlow},
};
pub use clipboard::ClipboardContext; // TODO: remove
pub struct GliumInputState {
pub raw: egui::RawInput,
}
impl GliumInputState {
pub fn from_pixels_per_point(pixels_per_point: f32) -> Self {
Self {
raw: egui::RawInput {
pixels_per_point: Some(pixels_per_point),
..Default::default()
},
}
}
}
pub fn input_to_egui(
event: glutin::event::WindowEvent<'_>,
clipboard: Option<&mut ClipboardContext>,
input_state: &mut GliumInputState,
control_flow: &mut ControlFlow,
) {
use glutin::event::WindowEvent::*;
match event {
CloseRequested | Destroyed => *control_flow = ControlFlow::Exit,
MouseInput { state, .. } => {
input_state.raw.mouse_down = state == glutin::event::ElementState::Pressed;
}
CursorMoved {
position: pos_in_pixels,
..
} => {
input_state.raw.mouse_pos = Some(pos2(
pos_in_pixels.x as f32 / input_state.raw.pixels_per_point.unwrap(),
pos_in_pixels.y as f32 / input_state.raw.pixels_per_point.unwrap(),
));
}
CursorLeft { .. } => {
input_state.raw.mouse_pos = None;
}
ReceivedCharacter(ch) => {
if printable_char(ch)
&& !input_state.raw.modifiers.ctrl
&& !input_state.raw.modifiers.mac_cmd
{
input_state.raw.events.push(Event::Text(ch.to_string()));
}
}
KeyboardInput { input, .. } => {
if let Some(keycode) = input.virtual_keycode {
let pressed = input.state == glutin::event::ElementState::Pressed;
if matches!(keycode, VirtualKeyCode::LAlt | VirtualKeyCode::RAlt) {
input_state.raw.modifiers.alt = pressed;
}
if matches!(keycode, VirtualKeyCode::LControl | VirtualKeyCode::RControl) {
input_state.raw.modifiers.ctrl = pressed;
if !cfg!(target_os = "macos") {
input_state.raw.modifiers.command = pressed;
}
}
if matches!(keycode, VirtualKeyCode::LShift | VirtualKeyCode::RShift) {
input_state.raw.modifiers.shift = pressed;
}
if cfg!(target_os = "macos")
&& matches!(keycode, VirtualKeyCode::LWin | VirtualKeyCode::RWin)
{
input_state.raw.modifiers.mac_cmd = pressed;
input_state.raw.modifiers.command = pressed;
}
if pressed {
if cfg!(target_os = "macos")
&& input_state.raw.modifiers.mac_cmd
&& keycode == VirtualKeyCode::Q
{
*control_flow = ControlFlow::Exit;
}
// VirtualKeyCode::Paste etc in winit are broken/untrustworthy,
// so we detect these things manually:
if input_state.raw.modifiers.command && keycode == VirtualKeyCode::X {
input_state.raw.events.push(Event::Cut);
} else if input_state.raw.modifiers.command && keycode == VirtualKeyCode::C {
input_state.raw.events.push(Event::Copy);
} else if input_state.raw.modifiers.command && keycode == VirtualKeyCode::V {
if let Some(clipboard) = clipboard {
match clipboard.get_contents() {
Ok(contents) => {
input_state.raw.events.push(Event::Text(contents));
}
Err(err) => {
eprintln!("Paste error: {}", err);
}
}
}
}
}
if let Some(key) = translate_virtual_key_code(keycode) {
input_state.raw.events.push(Event::Key {
key,
pressed,
modifiers: input_state.raw.modifiers,
});
}
}
}
MouseWheel { delta, .. } => {
match delta {
glutin::event::MouseScrollDelta::LineDelta(x, y) => {
let line_height = 24.0; // TODO
input_state.raw.scroll_delta = vec2(x, y) * line_height;
}
glutin::event::MouseScrollDelta::PixelDelta(delta) => {
// Actually point delta
input_state.raw.scroll_delta = vec2(delta.x as f32, delta.y as f32);
}
}
}
_ => {
// dbg!(event);
}
}
}
/// Glium sends special keys (backspace, delete, F1, ...) as characters.
/// Ignore those.
/// We also ignore '\r', '\n', '\t'.
/// Newlines are handled by the `Key::Enter` event.
fn printable_char(chr: char) -> bool {
let is_in_private_use_area = '\u{e000}' <= chr && chr <= '\u{f8ff}'
|| '\u{f0000}' <= chr && chr <= '\u{ffffd}'
|| '\u{100000}' <= chr && chr <= '\u{10fffd}';
!is_in_private_use_area && !chr.is_ascii_control()
}
pub fn translate_virtual_key_code(key: VirtualKeyCode) -> Option<egui::Key> {
use VirtualKeyCode::*;
Some(match key {
Down => Key::ArrowDown,
Left => Key::ArrowLeft,
Right => Key::ArrowRight,
Up => Key::ArrowUp,
Escape => Key::Escape,
Tab => Key::Tab,
Back => Key::Backspace,
Return => Key::Enter,
Space => Key::Space,
Insert => Key::Insert,
Delete => Key::Delete,
Home => Key::Home,
End => Key::End,
PageUp => Key::PageUp,
PageDown => Key::PageDown,
Key0 | Numpad0 => Key::Num0,
Key1 | Numpad1 => Key::Num1,
Key2 | Numpad2 => Key::Num2,
Key3 | Numpad3 => Key::Num3,
Key4 | Numpad4 => Key::Num4,
Key5 | Numpad5 => Key::Num5,
Key6 | Numpad6 => Key::Num6,
Key7 | Numpad7 => Key::Num7,
Key8 | Numpad8 => Key::Num8,
Key9 | Numpad9 => Key::Num9,
A => Key::A,
B => Key::B,
C => Key::C,
D => Key::D,
E => Key::E,
F => Key::F,
G => Key::G,
H => Key::H,
I => Key::I,
J => Key::J,
K => Key::K,
L => Key::L,
M => Key::M,
N => Key::N,
O => Key::O,
P => Key::P,
Q => Key::Q,
R => Key::R,
S => Key::S,
T => Key::T,
U => Key::U,
V => Key::V,
W => Key::W,
X => Key::X,
Y => Key::Y,
Z => Key::Z,
_ => {
return None;
}
})
}
pub fn translate_cursor(cursor_icon: egui::CursorIcon) -> glutin::window::CursorIcon {
match cursor_icon {
CursorIcon::Default => glutin::window::CursorIcon::Default,
CursorIcon::PointingHand => glutin::window::CursorIcon::Hand,
CursorIcon::ResizeHorizontal => glutin::window::CursorIcon::EwResize,
CursorIcon::ResizeNeSw => glutin::window::CursorIcon::NeswResize,
CursorIcon::ResizeNwSe => glutin::window::CursorIcon::NwseResize,
CursorIcon::ResizeVertical => glutin::window::CursorIcon::NsResize,
CursorIcon::Text => glutin::window::CursorIcon::Text,
CursorIcon::Grab => glutin::window::CursorIcon::Grab,
CursorIcon::Grabbing => glutin::window::CursorIcon::Grabbing,
}
}
pub fn handle_output(
output: egui::Output,
display: &glium::backend::glutin::Display,
clipboard: Option<&mut ClipboardContext>,
) {
if let Some(url) = output.open_url {
if let Err(err) = webbrowser::open(&url) {
eprintln!("Failed to open url: {}", err);
}
}
if !output.copied_text.is_empty() {
if let Some(clipboard) = clipboard {
if let Err(err) = clipboard.set_contents(output.copied_text) {
eprintln!("Copy/Cut error: {}", err);
}
}
}
display
.gl_window()
.window()
.set_cursor_icon(translate_cursor(output.cursor_icon));
}
pub fn init_clipboard() -> Option<ClipboardContext> {
match ClipboardContext::new() {
Ok(clipboard) => Some(clipboard),
Err(err) => {
eprintln!("Failed to initialize clipboard: {}", err);
None
}
}
}
// ----------------------------------------------------------------------------
/// Time of day as seconds since midnight. Used for clock in demo app.
pub fn seconds_since_midnight() -> Option<f64> {
#[cfg(feature = "time")]
{
use chrono::Timelike;
let time = chrono::Local::now().time();
let seconds_since_midnight =
time.num_seconds_from_midnight() as f64 + 1e-9 * (time.nanosecond() as f64);
Some(seconds_since_midnight)
}
#[cfg(not(feature = "time"))]
None
}
pub fn | (display: &glium::Display) -> Vec2 {
let (width_in_pixels, height_in_pixels) = display.get_framebuffer_dimensions();
vec2(width_in_pixels as f32, height_in_pixels as f32)
}
pub fn native_pixels_per_point(display: &glium::Display) -> f32 {
display.gl_window().window().scale_factor() as f32
}
| screen_size_in_pixels |
SharpRepeatOne.d.ts | export { default as SharpRepeatOne } from './Icon'; |
||
object.vesobj.go | //
// Copyright (c) 2018 Volterra, Inc. All rights reserved.
// Code generated by ves-gen-schema-go. DO NOT EDIT.
//
package namespace
import (
"context"
"fmt"
"math/rand"
"strings"
"time"
google_protobuf "github.com/gogo/protobuf/types"
multierror "github.com/hashicorp/go-multierror"
"gopkg.volterra.us/stdlib/codec"
"gopkg.volterra.us/stdlib/db"
"gopkg.volterra.us/stdlib/errors"
"gopkg.volterra.us/stdlib/store"
ves_io_schema "github.com/volterraedge/terraform-provider-volterra/pbgo/extschema/schema"
"github.com/google/uuid"
"gopkg.volterra.us/stdlib/db/sro"
)
const (
ObjectDefTblName = "ves.io.schema.namespace.Object.default"
ObjectType = "ves.io.schema.namespace.Object"
StatusObjectDefTblName = "ves.io.schema.namespace.StatusObject.default"
StatusObjectType = "ves.io.schema.namespace.StatusObject"
)
// augmented methods on protoc/std generated struct
func (e *Object) Type() string {
return "ves.io.schema.namespace.Object"
}
func (e *Object) ToEntry() db.Entry {
return NewDBObject(e, db.OpWithNoCopy())
}
func LocateObject(ctx context.Context, locator db.EntryLocator, uid, tenant, namespace, name string, opts ...db.FindEntryOpt) (*DBObject, error) {
timestamp, err := google_protobuf.TimestampProto(time.Now())
if err != nil {
return nil, errors.Wrapf(err, "%s: LocateObject", uid)
}
if uid != "" {
obj, exist, err := FindObject(ctx, locator, uid, opts...)
if err != nil {
return nil, errors.Wrapf(err, "%s: LocateObject", uid)
}
if exist && obj != nil {
obj.SystemMetadata.ModificationTimestamp = timestamp
return obj, nil
}
} else {
uid = uuid.New().String()
}
sysMD := &ves_io_schema.SystemObjectMetaType{
Uid: uid,
Tenant: tenant,
CreatorClass: locator.GetCreatorClass(),
CreatorId: locator.GetCreatorID(),
CreationTimestamp: timestamp,
ModificationTimestamp: timestamp,
}
obj := NewDBObject(nil)
obj.SetObjUid(uid)
obj.SetObjName(name)
obj.SetObjNamespace(namespace)
obj.SetObjSystemMetadata(sysMD)
obj.Spec = &SpecType{}
return obj, nil
}
func FindObject(ctx context.Context, finder db.EntryFinder, key string, opts ...db.FindEntryOpt) (*DBObject, bool, error) {
e, exist, err := finder.FindEntry(ctx, ObjectDefTblName, key, opts...)
if !exist || err != nil {
return nil, exist, err
}
obj, ok := e.(*DBObject)
if !ok {
return nil, false, fmt.Errorf("Cannot convert entry to object")
}
return obj, exist, err
}
func ListObject(ctx context.Context, lister db.EntryLister, opts ...db.ListEntriesOpt) ([]*DBObject, error) {
var (
oList []*DBObject
merr *multierror.Error
)
eList, err := lister.ListEntries(ctx, ObjectDefTblName, opts...)
if err != nil {
merr = multierror.Append(merr, err)
}
for _, e := range eList {
obj, ok := e.(*DBObject)
if ok {
oList = append(oList, obj)
} else {
merr = multierror.Append(merr, fmt.Errorf("Cannot convert entry to %s object", ObjectType))
}
}
return oList, errors.ErrOrNil(merr)
}
// Redact squashes sensitive info in o (in-place)
func (o *Object) Redact(ctx context.Context) error {
// clear fields with confidential option set (at message or field level)
if o == nil {
return nil
}
if err := o.GetSpec().Redact(ctx); err != nil {
return errors.Wrapf(err, "Redacting Object.spec")
}
return nil
}
func (o *Object) DeepCopy() *Object {
if o == nil {
return nil
}
ser, err := o.Marshal()
if err != nil {
return nil
}
c := &Object{}
err = c.Unmarshal(ser)
if err != nil {
return nil
}
return c
}
func (e *Object) ToJSON() (string, error) {
return codec.ToJSON(e)
}
func (e *Object) ToYAML() (string, error) {
return codec.ToYAML(e)
}
func (e *Object) GetTraceInfo() string {
sysMD := e.GetSystemMetadata()
if sysMD == nil {
return ""
}
return sysMD.GetTraceInfo()
}
// A struct wrapping protoc/std generated struct with additional capabilities
// forming a db.Entry
type DBObject struct {
// Anonymous embed of standard protobuf generated struct
*Object
tbl db.Table
}
// GetObjectIndexers returns the associated store.Indexers for Object
func GetObjectIndexers() store.Indexers {
return nil
}
func (e *DBObject) GetDB() (*db.DB, error) {
if e.tbl == nil {
return nil, fmt.Errorf("Entry has no table")
}
return e.tbl.GetDB(), nil
}
// Implement ves.io/stdlib/db.Entry interface
func (e *DBObject) Key(opts ...db.KeyOpt) (string, error) {
ko := db.NewKeyOpts(opts...)
if ko.Public {
md := e.GetMetadata()
if md == nil {
return "", fmt.Errorf("Metadata is nil")
}
return fmt.Sprintf("%s/%s", md.GetNamespace(), md.GetName()), nil
} else {
if e.GetSystemMetadata() == nil {
return "", fmt.Errorf("SystemMetadata is nil")
}
return e.GetSystemMetadata().GetUid(), nil
}
}
func (e *DBObject) Type() string {
return "ves.io.schema.namespace.Object"
}
func (e *DBObject) DeepCopy() db.Entry {
if e == nil {
return nil
}
n := NewDBObject(e.Object)
n.tbl = e.tbl
return n
}
func (e *DBObject) MarshalBytes() ([]byte, error) {
return e.Marshal()
}
func (e *DBObject) UnmarshalBytes(b []byte) error {
return e.Unmarshal(b)
}
func (e *DBObject) Sample(r *rand.Rand) (db.Entry, error) {
uid := uuid.New().String()
o := &Object{
Metadata: &ves_io_schema.ObjectMetaType{
Name: uuid.New().String(),
Namespace: uuid.New().String(),
Uid: uid,
},
SystemMetadata: &ves_io_schema.SystemObjectMetaType{
Uid: uid,
Tenant: uuid.New().String(),
},
Spec: &SpecType{},
}
return &DBObject{o, e.tbl}, nil
}
func (e *DBObject) Validate(ctx context.Context, opts ...db.ValidateOpt) error {
return ObjectValidator().Validate(ctx, e.Object, opts...)
}
func (e *DBObject) SetBlob(ctx context.Context, bID string, bVal interface{}, opts ...db.BlobOpt) error {
db, err := e.GetDB()
if err != nil {
return errors.Wrap(err, "SetBlob")
}
key, err := e.Key()
if err != nil {
return errors.Wrap(err, "SetBlob accessing key")
}
err = db.SetEntryBlob(ctx, key, e.Type(), bID, bVal, opts...)
if err != nil {
return errors.Wrap(err, "SetBlob setting in db")
}
return nil
}
func (e *DBObject) ClrBlob(ctx context.Context, bID string, opts ...db.BlobOpt) error {
db, err := e.GetDB()
if err != nil {
return errors.Wrap(err, "ClrBlob")
}
key, err := e.Key()
if err != nil {
return errors.Wrap(err, "ClrBlob accessing key")
}
err = db.ClrEntryBlob(ctx, key, e.Type(), bID, opts...)
if err != nil {
return errors.Wrap(err, "ClrBlob clearing in db")
}
return nil
}
func (e *DBObject) GetBlob(ctx context.Context, bID string, opts ...db.BlobOpt) (interface{}, error) {
db, err := e.GetDB()
if err != nil {
return nil, errors.Wrap(err, "GetBlob")
}
key, err := e.Key()
if err != nil {
return nil, errors.Wrap(err, "GetBlob accessing key")
}
return db.GetEntryBlob(ctx, key, e.Type(), bID, opts...)
}
func (e *DBObject) GetBlobs(ctx context.Context, opts ...db.BlobOpt) (map[string]interface{}, error) {
db, err := e.GetDB()
if err != nil {
return nil, errors.Wrap(err, "GetBlobs")
}
key, err := e.Key()
if err != nil {
return nil, errors.Wrap(err, "GetBlobs accessing key")
}
return db.GetEntryBlobs(ctx, key, e.Type(), opts...)
}
func (e *DBObject) IsDeleted() (bool, error) {
db, err := e.GetDB()
if err != nil {
return false, errors.Wrap(err, "IsDeleted")
}
key, err := e.Key()
if err != nil {
return false, errors.Wrap(err, "IsDeleted accessing key")
}
isDel, err := db.IsEntryDeleted(key, e.Type())
if err != nil {
return false, errors.Wrap(err, "IsDeleted accessing db")
}
return isDel, nil
}
// Implement ves.io/stdlib/db.EntryPvt interface
func (e *DBObject) SetTable(tbl db.Table) {
e.tbl = tbl
}
func (e *DBObject) GetDRefInfo() ([]db.DRefInfo, error) {
var (
err error
drInfos, fdrInfos []db.DRefInfo
)
refrUID, err := e.Key()
if err != nil {
return nil, errors.Wrap(err, "GetDRefInfo, error in key")
}
fdrInfos, err = e.GetSystemMetadataDRefInfo()
if err != nil {
return nil, errors.Wrap(err, "Getting Field direct ref info")
}
for _, dri := range fdrInfos {
// Convert Spec.LcSpec.vnRefs to ves.io.examplesvc.objectone.Object.Spec.LcSpec.vnRefs
dri.DRField = "ves.io.schema.namespace.Object." + dri.DRField
dri.RefrType = e.Type()
dri.RefrUID = refrUID
// convert any ref_to schema annotation specified by kind value to type value
if !strings.HasPrefix(dri.RefdType, "ves.io") {
d, err := e.GetDB()
if err != nil {
return nil, errors.Wrap(err, "Cannot find db for entry to resolve kind to type")
}
refdType, err := d.TypeForEntryKind(dri.RefrType, dri.RefrUID, dri.RefdType)
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("Cannot convert kind %s to type", dri.RefdType))
}
dri.RefdType = refdType
}
drInfos = append(drInfos, dri)
}
return drInfos, err
}
func (e *DBObject) ToStore() store.Entry {
return e.Object
}
func (e *DBObject) ToJSON() (string, error) {
return e.ToStore().ToJSON()
}
func (e *DBObject) ToYAML() (string, error) {
return e.ToStore().ToYAML()
}
func (e *DBObject) GetTable() db.Table {
return e.tbl
}
func NewDBObject(o *Object, opts ...db.OpOption) *DBObject {
op := db.NewOpFrom(opts...)
if o == nil {
return &DBObject{Object: &Object{}}
}
obj := o
if !op.NoCopy() {
obj = o.DeepCopy()
}
return &DBObject{Object: obj}
}
func NewEntryObject(opts ...db.OpOption) db.Entry {
op := db.NewOpFrom(opts...)
s := op.StoreEntry()
switch v := s.(type) {
case nil:
return NewDBObject(nil, opts...)
case *Object:
return NewDBObject(v, opts...)
}
return nil
}
// GetDRefInfo for the field's type
func (e *DBObject) GetSystemMetadataDRefInfo() ([]db.DRefInfo, error) {
var (
drInfos, driSet []db.DRefInfo
err error
)
_ = driSet
if e.SystemMetadata == nil {
return []db.DRefInfo{}, nil
}
driSet, err = e.SystemMetadata.GetDRefInfo()
if err != nil {
return nil, err
}
for _, dri := range driSet {
dri.DRField = "system_metadata." + dri.DRField
drInfos = append(drInfos, dri)
}
return drInfos, err
}
// Implement sro.SRO interface
func (o *DBObject) GetObjMetadata() sro.ObjectMetadata {
return o.GetMetadata()
}
func (o *DBObject) SetObjMetadata(in sro.ObjectMetadata) error {
if in == nil {
o.Metadata = nil
return nil
}
m, ok := in.(*ves_io_schema.ObjectMetaType)
if !ok {
return fmt.Errorf("Error: SetObjMetadata expected *ObjectMetaType, got %T", in)
}
o.Metadata = m
return nil
}
func (o *DBObject) GetObjSystemMetadata() sro.SystemMetadata {
if o.GetSystemMetadata() == nil {
return nil
}
return o.GetSystemMetadata()
}
func (o *DBObject) SetObjSystemMetadata(in sro.SystemMetadata) error {
if in == nil {
o.SystemMetadata = nil
return nil
}
m, ok := in.(*ves_io_schema.SystemObjectMetaType)
if !ok {
return fmt.Errorf("Error: SetObjSystemMetadata expected *SystemObjectMetaType, got %T", in)
}
o.SystemMetadata = m
return nil
}
func (o *DBObject) GetObjSpec() sro.Spec {
if o.GetSpec() == nil {
return nil
}
return o.GetSpec()
}
func (o *DBObject) SetObjSpec(in sro.Spec) error {
if in == nil {
o.Spec = nil
return nil
}
m, ok := in.(*SpecType)
if !ok {
return fmt.Errorf("Error: SetObjSpec expected *SpecType, got %T", in)
}
o.Spec = m
return nil
}
func FindObjectStatus(ctx context.Context, d db.Interface, objUid string) ([]*StatusObject, error) {
statusDBEntries, err := d.GetEntryBackrefs(ctx, objUid, ObjectType, db.WithBackrefTypes([]string{"ves.io.schema.namespace.StatusObject"}))
if err != nil {
return nil, err
}
var merr *multierror.Error
var statusObjs []*StatusObject
for _, statusDBEntry := range statusDBEntries {
statusEntry := statusDBEntry.ToStore()
statusObj, ok := statusEntry.(*StatusObject)
if !ok {
merr = multierror.Append(merr, fmt.Errorf("Status Backref expected *StatusObject, got %T: %v", statusEntry, statusEntry))
continue
}
statusObjs = append(statusObjs, statusObj)
}
return statusObjs, errors.ErrOrNil(merr)
}
// SetObjectRef sets reference to a configuration object
func (o *StatusObject) SetObjectRef(objKind, objUid string) error {
if len(o.GetObjectRefs()) != 0 {
return fmt.Errorf("StatusObject already has a reference to %v", o.GetObjectRefs())
}
o.ObjectRefs = append(o.ObjectRefs, &ves_io_schema.ObjectRefType{Kind: objKind, Uid: objUid})
return nil
}
func (o *StatusObject) GetStatusObjMetadata() sro.StatusObjectMetadata {
return o.GetMetadata()
}
func (o *StatusObject) SetStatusObjMetadata(md sro.StatusObjectMetadata) {
if o == nil {
return
}
if o.Metadata == nil {
o.Metadata = &ves_io_schema.StatusMetaType{}
}
o.Metadata = md.(*ves_io_schema.StatusMetaType)
}
// GetVtrpId returns vtrpId of the status object.
func (o *StatusObject) GetVtrpId() string {
return o.GetMetadata().GetVtrpId()
}
// SetVtrpId sets vtrpId of the status object.
func (o *StatusObject) SetVtrpId(id string) {
o.GetMetadata().SetVtrpId(id)
}
func (o *StatusObject) GetStatusObjConditions() []sro.StatusObjectCondition {
if o == nil {
return nil
}
return ves_io_schema.ToStatusObjectConditions(o.GetConditions())
}
func (o *StatusObject) SetStatusObjConditions(socSet []sro.StatusObjectCondition) {
if o == nil {
return
}
o.Conditions = ves_io_schema.FromStatusObjectConditions(socSet)
}
func (o *DBObject) GetObjType() string {
return o.Type()
}
// GetObjUid returns uuid from source-of-truth, in systemMetadata
func (o *DBObject) GetObjUid() string {
return o.GetSystemMetadata().GetUid()
}
// GetObjTenant returns tenant from source-of-truth, in systemMetadata
func (o *DBObject) GetObjTenant() string {
return o.GetSystemMetadata().GetTenant()
}
// GetObjCreatorClass returns creator-class from systemMetadata
func (o *DBObject) GetObjCreatorClass() string {
return o.GetSystemMetadata().GetCreatorClass()
}
// GetObjectIndex returns object-index from systemMetadata
func (o *DBObject) GetObjectIndex() uint32 {
return o.GetSystemMetadata().GetObjectIndex()
}
// SetObjUid sets uuid as a hint, in Metadata
func (o *DBObject) SetObjUid(u string) error {
// TODO: make sure 'u' is of uuid form
m := o.GetMetadata()
if m == nil {
m = &ves_io_schema.ObjectMetaType{}
}
m.Uid = u
o.Metadata = m
return nil
}
func (o *DBObject) GetObjName() string {
return o.GetMetadata().GetName()
}
func (o *DBObject) SetObjName(n string) error {
m := o.GetMetadata()
if m == nil {
m = &ves_io_schema.ObjectMetaType{}
}
m.Name = n
o.Metadata = m
return nil
}
func (o *DBObject) GetObjNamespace() string {
return o.GetMetadata().GetNamespace()
}
func (o *DBObject) SetObjNamespace(ns string) error {
m := o.GetMetadata()
if m == nil {
m = &ves_io_schema.ObjectMetaType{}
}
m.Namespace = ns
o.Metadata = m
return nil
}
func (o *DBObject) GetObjLabels() map[string]string {
return o.GetMetadata().GetLabels()
}
func (o *DBObject) SetObjLabels(l map[string]string) error {
m := o.GetMetadata()
if m == nil {
m = &ves_io_schema.ObjectMetaType{}
}
m.Labels = l
o.Metadata = m
return nil
}
func (o *DBObject) GetObjAnnotations() map[string]string {
return o.GetMetadata().GetAnnotations()
}
func (o *DBObject) SetObjAnnotations(a map[string]string) error {
m := o.GetMetadata()
if m == nil {
m = &ves_io_schema.ObjectMetaType{}
}
m.Annotations = a
o.Metadata = m
return nil
}
func (o *DBObject) GetObjDescription() string {
return o.GetMetadata().GetDescription()
}
func (o *DBObject) SetObjDescription(d string) error {
m := o.GetMetadata()
if m == nil {
m = &ves_io_schema.ObjectMetaType{}
}
m.Description = d
o.Metadata = m
return nil
}
func (o *DBObject) GetObjDisable() bool {
return o.GetMetadata().GetDisable()
}
func (o *DBObject) SetObjDisable(d bool) error {
m := o.GetMetadata()
if m == nil {
m = &ves_io_schema.ObjectMetaType{}
}
m.Disable = d
o.Metadata = m
return nil
}
func (o *DBObject) GetObjSREDisable() bool {
return o.GetSystemMetadata().GetSreDisable()
}
func (o *DBObject) SetObjSREDisable(d bool) error {
m := o.GetSystemMetadata()
if m == nil {
m = &ves_io_schema.SystemObjectMetaType{}
}
m.SreDisable = d
return nil
}
func (o *DBObject) SetObjCreator(cls, inst string) error {
m := o.GetSystemMetadata()
if m == nil {
m = &ves_io_schema.SystemObjectMetaType{}
}
m.CreatorClass = cls
m.CreatorId = inst
o.SystemMetadata = m
return nil
}
func (o *DBObject) SetObjectIndex(idx uint32) error {
m := o.GetSystemMetadata()
if m == nil {
m = &ves_io_schema.SystemObjectMetaType{}
}
m.ObjectIndex = idx
o.SystemMetadata = m
return nil
}
func (o *DBObject) GetObjFinalizers() []string {
return o.GetSystemMetadata().GetFinalizers()
}
func (o *DBObject) SetObjFinalizers(values ...string) error {
m := o.GetSystemMetadata()
if m == nil {
return fmt.Errorf("Object has nil system_metadata")
}
m.Finalizers = values
return nil
}
func (o *DBObject) GetObjPendingInitializers() []string {
initializers := o.GetSystemMetadata().GetInitializers()
var pending []string
for _, p := range initializers.GetPending() {
pending = append(pending, p.GetName())
}
return pending
}
func (o *DBObject) SetObjPendingInitializers(pending ...string) {
m := o.GetSystemMetadata()
if m == nil {
m = &ves_io_schema.SystemObjectMetaType{}
o.SystemMetadata = m
}
initializers := m.GetInitializers()
if initializers == nil {
initializers = &ves_io_schema.InitializersType{}
m.Initializers = initializers
}
var pendingInitializers []*ves_io_schema.InitializerType
for _, p := range pending {
pendingInitializers = append(pendingInitializers, &ves_io_schema.InitializerType{Name: p})
}
initializers.Pending = pendingInitializers
}
func (o *DBObject) IsSpecEqual(other sro.SRO) bool {
otherObjSpec := other.GetObjSpec()
otherSpec, ok := otherObjSpec.(*SpecType)
if !ok {
return false
}
return o.GetSpec().Equal(otherSpec)
}
// GetVtrpId returns vtrpId of the object.
func (o *DBObject) GetVtrpId() string {
return o.GetSystemMetadata().GetVtrpId()
}
// SetVtrpId sets vtrpId of the object.
func (o *DBObject) SetVtrpId(id string) {
o.GetSystemMetadata().SetVtrpId(id)
}
type ValidateObject struct {
FldValidators map[string]db.ValidatorFunc
}
func (v *ValidateObject) Validate(ctx context.Context, pm interface{}, opts ...db.ValidateOpt) error {
e, ok := pm.(*Object)
if !ok {
switch t := pm.(type) {
default:
return fmt.Errorf("Expected type *Object got type %s", t)
}
}
if e == nil {
return nil
}
if fv, exists := v.FldValidators["metadata"]; exists {
vOpts := append(opts, db.WithValidateField("metadata"))
if err := fv(ctx, e.GetMetadata(), vOpts...); err != nil {
return err
}
}
if fv, exists := v.FldValidators["spec"]; exists {
vOpts := append(opts, db.WithValidateField("spec"))
if err := fv(ctx, e.GetSpec(), vOpts...); err != nil {
return err
}
}
if fv, exists := v.FldValidators["system_metadata"]; exists {
vOpts := append(opts, db.WithValidateField("system_metadata"))
if err := fv(ctx, e.GetSystemMetadata(), vOpts...); err != nil {
return err
}
}
return nil
}
// Well-known symbol for default validator implementation
var DefaultObjectValidator = func() *ValidateObject {
v := &ValidateObject{FldValidators: map[string]db.ValidatorFunc{}}
v.FldValidators["metadata"] = ves_io_schema.ObjectMetaTypeValidator().Validate
v.FldValidators["system_metadata"] = ves_io_schema.SystemObjectMetaTypeValidator().Validate
v.FldValidators["spec"] = SpecTypeValidator().Validate
return v
}()
func ObjectValidator() db.Validator {
return DefaultObjectValidator
}
// augmented methods on protoc/std generated struct
func (e *StatusObject) Type() string {
return "ves.io.schema.namespace.StatusObject"
}
func (e *StatusObject) ToEntry() db.Entry {
return NewDBStatusObject(e, db.OpWithNoCopy())
}
func FindStatusObject(ctx context.Context, finder db.EntryFinder, key string, opts ...db.FindEntryOpt) (*DBStatusObject, bool, error) {
e, exist, err := finder.FindEntry(ctx, StatusObjectDefTblName, key, opts...)
if !exist || err != nil {
return nil, exist, err
}
obj, ok := e.(*DBStatusObject)
if !ok {
return nil, false, fmt.Errorf("Cannot convert entry to object")
}
return obj, exist, err
}
func ListStatusObject(ctx context.Context, lister db.EntryLister, opts ...db.ListEntriesOpt) ([]*DBStatusObject, error) {
var (
oList []*DBStatusObject
merr *multierror.Error
)
eList, err := lister.ListEntries(ctx, StatusObjectDefTblName, opts...)
if err != nil {
merr = multierror.Append(merr, err)
}
for _, e := range eList {
obj, ok := e.(*DBStatusObject)
if ok {
oList = append(oList, obj)
} else {
merr = multierror.Append(merr, fmt.Errorf("Cannot convert entry to %s object", StatusObjectType))
}
}
return oList, errors.ErrOrNil(merr)
}
func (o *StatusObject) DeepCopy() *StatusObject {
if o == nil {
return nil
}
ser, err := o.Marshal()
if err != nil {
return nil
}
c := &StatusObject{}
err = c.Unmarshal(ser)
if err != nil {
return nil
}
return c
}
func (e *StatusObject) ToJSON() (string, error) {
return codec.ToJSON(e)
}
func (e *StatusObject) ToYAML() (string, error) {
return codec.ToYAML(e)
}
// A struct wrapping protoc/std generated struct with additional capabilities
// forming a db.Entry
type DBStatusObject struct {
// Anonymous embed of standard protobuf generated struct
*StatusObject
tbl db.Table
}
// GetStatusObjectIndexers returns the associated store.Indexers for StatusObject
func GetStatusObjectIndexers() store.Indexers {
return nil
}
func (e *DBStatusObject) GetDB() (*db.DB, error) {
if e.tbl == nil {
return nil, fmt.Errorf("Entry has no table")
}
return e.tbl.GetDB(), nil
}
// Implement ves.io/stdlib/db.Entry interface
func (e *DBStatusObject) Key(opts ...db.KeyOpt) (string, error) {
return e.GetMetadata().GetUid(), nil
}
func (e *DBStatusObject) Type() string {
return "ves.io.schema.namespace.StatusObject"
}
func (e *DBStatusObject) DeepCopy() db.Entry {
if e == nil {
return nil
}
n := NewDBStatusObject(e.StatusObject)
n.tbl = e.tbl
return n
}
func (e *DBStatusObject) MarshalBytes() ([]byte, error) {
return e.Marshal()
}
func (e *DBStatusObject) UnmarshalBytes(b []byte) error {
return e.Unmarshal(b)
}
func (e *DBStatusObject) Sample(r *rand.Rand) (db.Entry, error) {
o := &StatusObject{}
return &DBStatusObject{o, e.tbl}, nil
}
func (e *DBStatusObject) Validate(ctx context.Context, opts ...db.ValidateOpt) error {
return StatusObjectValidator().Validate(ctx, e.StatusObject, opts...)
}
func (e *DBStatusObject) SetBlob(ctx context.Context, bID string, bVal interface{}, opts ...db.BlobOpt) error {
db, err := e.GetDB()
if err != nil {
return errors.Wrap(err, "SetBlob")
}
key, err := e.Key()
if err != nil {
return errors.Wrap(err, "SetBlob accessing key")
}
err = db.SetEntryBlob(ctx, key, e.Type(), bID, bVal, opts...)
if err != nil {
return errors.Wrap(err, "SetBlob setting in db")
}
return nil
}
func (e *DBStatusObject) ClrBlob(ctx context.Context, bID string, opts ...db.BlobOpt) error {
db, err := e.GetDB()
if err != nil {
return errors.Wrap(err, "ClrBlob")
}
key, err := e.Key()
if err != nil {
return errors.Wrap(err, "ClrBlob accessing key")
}
err = db.ClrEntryBlob(ctx, key, e.Type(), bID, opts...)
if err != nil {
return errors.Wrap(err, "ClrBlob clearing in db")
}
return nil
}
func (e *DBStatusObject) GetBlob(ctx context.Context, bID string, opts ...db.BlobOpt) (interface{}, error) {
db, err := e.GetDB()
if err != nil {
return nil, errors.Wrap(err, "GetBlob")
}
key, err := e.Key()
if err != nil {
return nil, errors.Wrap(err, "GetBlob accessing key")
}
return db.GetEntryBlob(ctx, key, e.Type(), bID, opts...)
}
func (e *DBStatusObject) GetBlobs(ctx context.Context, opts ...db.BlobOpt) (map[string]interface{}, error) {
db, err := e.GetDB()
if err != nil {
return nil, errors.Wrap(err, "GetBlobs")
}
key, err := e.Key()
if err != nil {
return nil, errors.Wrap(err, "GetBlobs accessing key")
}
return db.GetEntryBlobs(ctx, key, e.Type(), opts...)
}
func (e *DBStatusObject) IsDeleted() (bool, error) {
db, err := e.GetDB()
if err != nil {
return false, errors.Wrap(err, "IsDeleted")
}
key, err := e.Key()
if err != nil {
return false, errors.Wrap(err, "IsDeleted accessing key")
}
isDel, err := db.IsEntryDeleted(key, e.Type())
if err != nil {
return false, errors.Wrap(err, "IsDeleted accessing db")
}
return isDel, nil
}
// Implement ves.io/stdlib/db.EntryPvt interface
func (e *DBStatusObject) SetTable(tbl db.Table) {
e.tbl = tbl
}
func (e *DBStatusObject) GetDRefInfo() ([]db.DRefInfo, error) {
var (
err error
drInfos, fdrInfos []db.DRefInfo
)
refrUID, err := e.Key()
if err != nil {
return nil, errors.Wrap(err, "GetDRefInfo, error in key")
}
fdrInfos, err = e.GetObjectRefsDRefInfo()
if err != nil {
return nil, errors.Wrap(err, "Getting Field direct ref info")
}
for _, dri := range fdrInfos {
// Convert Spec.LcSpec.vnRefs to ves.io.examplesvc.objectone.Object.Spec.LcSpec.vnRefs
dri.DRField = "ves.io.schema.namespace.StatusObject." + dri.DRField
dri.RefrType = e.Type()
dri.RefrUID = refrUID
// convert any ref_to schema annotation specified by kind value to type value
if !strings.HasPrefix(dri.RefdType, "ves.io") {
d, err := e.GetDB()
if err != nil {
return nil, errors.Wrap(err, "Cannot find db for entry to resolve kind to type")
}
refdType, err := d.TypeForEntryKind(dri.RefrType, dri.RefrUID, dri.RefdType)
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("Cannot convert kind %s to type", dri.RefdType))
}
dri.RefdType = refdType
}
drInfos = append(drInfos, dri)
}
return drInfos, err
}
func (e *DBStatusObject) ToStore() store.Entry {
return e.StatusObject
}
func (e *DBStatusObject) ToJSON() (string, error) {
return e.ToStore().ToJSON()
}
func (e *DBStatusObject) ToYAML() (string, error) {
return e.ToStore().ToYAML()
}
func (e *DBStatusObject) GetTable() db.Table {
return e.tbl
}
func NewDBStatusObject(o *StatusObject, opts ...db.OpOption) *DBStatusObject {
op := db.NewOpFrom(opts...)
if o == nil {
return &DBStatusObject{StatusObject: &StatusObject{}}
}
obj := o
if !op.NoCopy() |
return &DBStatusObject{StatusObject: obj}
}
func NewEntryStatusObject(opts ...db.OpOption) db.Entry {
op := db.NewOpFrom(opts...)
s := op.StoreEntry()
switch v := s.(type) {
case nil:
return NewDBStatusObject(nil, opts...)
case *StatusObject:
return NewDBStatusObject(v, opts...)
}
return nil
}
func (e *DBStatusObject) GetObjectRefsDRefInfo() ([]db.DRefInfo, error) {
drInfos := []db.DRefInfo{}
refrUID, err := e.Key()
if err != nil {
return nil, errors.Wrap(err, "GetDRefInfo, error in key")
}
for i, ref := range e.GetObjectRefs() {
if ref == nil {
return nil, fmt.Errorf("StatusObject.object_refs[%d] has a nil value", i)
}
// resolve kind to type if needed at DBObject.GetDRefInfo()
drInfos = append(drInfos, db.DRefInfo{
RefdType: "namespace.Object",
RefdUID: ref.Uid,
RefdTenant: ref.Tenant,
RefdNS: ref.Namespace,
RefdName: ref.Name,
RefrType: e.Type(),
RefrUID: refrUID,
DRField: "object_refs",
Ref: ref,
})
}
return drInfos, nil
}
// GetObjectRefsDBEntries returns the db.Entry corresponding to the ObjRefType from the default Table
func (e *DBStatusObject) GetObjectRefsDBEntries(ctx context.Context, d db.Interface) ([]db.Entry, error) {
var entries []db.Entry
refrUID, err := e.Key()
if err != nil {
return nil, errors.Wrap(err, "Get<fld>DBEntries, error in key")
}
refdType, err := d.TypeForEntryKind(e.Type(), refrUID, "namespace.Object")
if err != nil {
return nil, errors.Wrap(err, "Cannot find type for kind: namespace")
}
tblName := db.DefaultTableName(refdType)
if intTbl, err := d.GetTable(ctx, db.InternalTableName(refdType)); err == nil {
tblName = intTbl.Name()
}
for _, ref := range e.GetObjectRefs() {
e, exist, err := d.FindEntry(ctx, tblName, ref.Uid)
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("Tbl: %s, Key: %s", tblName, ref.Uid))
}
if !exist {
continue
}
entries = append(entries, e)
}
return entries, nil
}
type ValidateStatusObject struct {
FldValidators map[string]db.ValidatorFunc
}
func (v *ValidateStatusObject) Validate(ctx context.Context, pm interface{}, opts ...db.ValidateOpt) error {
e, ok := pm.(*StatusObject)
if !ok {
switch t := pm.(type) {
default:
return fmt.Errorf("Expected type *StatusObject got type %s", t)
}
}
if e == nil {
return nil
}
if fv, exists := v.FldValidators["conditions"]; exists {
vOpts := append(opts, db.WithValidateField("conditions"))
for idx, item := range e.GetConditions() {
vOpts := append(vOpts, db.WithValidateRepItem(idx))
if err := fv(ctx, item, vOpts...); err != nil {
return err
}
}
}
if fv, exists := v.FldValidators["metadata"]; exists {
vOpts := append(opts, db.WithValidateField("metadata"))
if err := fv(ctx, e.GetMetadata(), vOpts...); err != nil {
return err
}
}
if fv, exists := v.FldValidators["object_refs"]; exists {
vOpts := append(opts, db.WithValidateField("object_refs"))
for idx, item := range e.GetObjectRefs() {
vOpts := append(vOpts, db.WithValidateRepItem(idx))
if err := fv(ctx, item, vOpts...); err != nil {
return err
}
}
}
return nil
}
// Well-known symbol for default validator implementation
var DefaultStatusObjectValidator = func() *ValidateStatusObject {
v := &ValidateStatusObject{FldValidators: map[string]db.ValidatorFunc{}}
v.FldValidators["conditions"] = ves_io_schema.ConditionTypeValidator().Validate
return v
}()
func StatusObjectValidator() db.Validator {
return DefaultStatusObjectValidator
}
| {
obj = o.DeepCopy()
} |
sensor_test.go | package device
import (
"fmt"
"math/rand"
"testing"
"time"
"github.com/nherson/brewski/measurement"
"github.com/stretchr/testify/assert"
"go.uber.org/zap"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
type mockReader struct {
samples []measurement.Sample
sampleReturnCount int
index int
}
func newMockReader(samples []measurement.Sample, sampleReturnCount int) *mockReader {
return &mockReader{
samples: samples,
index: 0,
sampleReturnCount: sampleReturnCount,
}
}
func (mr *mockReader) Name() string {
return "mockReader"
}
func (mr *mockReader) Read() ([]measurement.Sample, error) {
toReturn := []measurement.Sample{}
if len(mr.samples)-mr.index < mr.sampleReturnCount |
for i := 0; i < mr.sampleReturnCount; i++ {
toReturn = append(toReturn, mr.samples[i+mr.index])
}
mr.index += mr.sampleReturnCount
return toReturn, nil
}
type mockCallback struct {
t *testing.T
expectedSamples []measurement.Sample
index int
}
func newMockCallback(t *testing.T, samples []measurement.Sample) *mockCallback {
return &mockCallback{
t: t,
expectedSamples: samples,
index: 0,
}
}
func (mc *mockCallback) Handle(actualSample measurement.Sample) error {
if mc.index == len(mc.expectedSamples) {
assert.Fail(mc.t, "ran out of expected samples, but Handle() has been called")
}
expectedSample := mc.expectedSamples[mc.index]
// test sample device name equality
assert.Equal(mc.t, expectedSample.DeviceName(), actualSample.DeviceName())
// test sample tag equality
for expectedKey, expectedValue := range expectedSample.Tags() {
actualValue, found := actualSample.Tags()[expectedKey]
if !found {
assert.Fail(mc.t, fmt.Sprintf("missing expected key '%s'", expectedKey))
}
assert.Equal(mc.t, expectedValue, actualValue)
}
// test sample datapoint equality
assert.Equal(mc.t, len(expectedSample.Datapoints()), len(actualSample.Datapoints()))
for i, expectedDatapoint := range expectedSample.Datapoints() {
assert.Equal(mc.t, expectedDatapoint.Name(), actualSample.Datapoints()[i].Name())
assert.Equal(mc.t, expectedDatapoint.Value(), actualSample.Datapoints()[i].Value())
assert.Equal(mc.t, expectedDatapoint.Time(), actualSample.Datapoints()[i].Time())
}
mc.index++
return nil
}
// taken from https://stackoverflow.com/questions/22892120/how-to-generate-a-random-string-of-a-fixed-length-in-golang
func randomString(n int) string {
var letterRunes = []rune("abcdefghijklmnopqrstuvwxyz")
s := make([]rune, n)
for i := range s {
s[i] = letterRunes[rand.Intn(len(letterRunes))]
}
return string(s)
}
func generateTestSamples(deviceName string, numSamples int) []measurement.Sample {
t := time.Now()
samples := []measurement.Sample{}
for i := 0; i < numSamples; i++ {
sampleTime := t.Add(time.Duration(i) * time.Second)
sample := measurement.NewDeviceSample(deviceName)
// add a random number of tags
for j := 0; j < rand.Intn(5); j++ {
sample.AddTag(randomString(5), randomString(5))
}
// add a random number of datapoints
for k := 0; k < rand.Intn(5); k++ {
sample.AddDatapoint(randomString(5), rand.Float32(), sampleTime)
}
samples = append(samples, sample)
}
return samples
}
// The callback for this test makes sure that the sample received is what is expected.
// This is done by pre-generating a batch of samples for the "device" to emit, which is
// handed to both the "device" and the mock callback handler. When the handler receives
// a sample, it makes sure all the fields are the same as what is expected in its personal
// copy of the sample.
// This is essentially an integration test for the Sensor implementation.
func TestSensor(t *testing.T) {
assert.True(t, true)
testSamples := generateTestSamples("testDevice", 100)
mr := newMockReader(testSamples, 5)
mc := newMockCallback(t, testSamples)
logger, _ := zap.NewProduction()
sensor := NewSensor(mr, time.Millisecond, logger)
sensor.SetCallback(mc)
sensor.Start()
time.Sleep(5 * time.Second)
sensor.Stop()
}
| {
return nil, fmt.Errorf("not enough samples to return")
} |
basic.rs | //! Tests auto-converted from "sass-spec/spec/libsass/parent-selector/basic.hrx"
#[test]
fn test() {
assert_eq!(
crate::rsass(
"foo bar {\ | \n\
\nfoo {\
\n bar baz & {\
\n bam: true;\
\n }\
\n}\
\n"
)
.unwrap(),
"baz foo bar {\
\n bam: true;\
\n}\
\nbar baz foo {\
\n bam: true;\
\n}\
\n"
);
} | \n baz & {\
\n bam: true;\
\n }\
\n}\ |
qt.py | #!/usr/bin/env python
#
# Electrum - Lightweight Bitcoin Client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import time
import threading
import base64
from functools import partial
import smtplib
import imaplib
import email
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.encoders import encode_base64
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import PyQt5.QtGui as QtGui
from PyQt5.QtWidgets import (QVBoxLayout, QLabel, QGridLayout, QLineEdit)
from electrum_dash.plugins import BasePlugin, hook
from electrum_dash.paymentrequest import PaymentRequest
from electrum_dash.i18n import _
from electrum_dash_gui.qt.util import EnterButton, Buttons, CloseButton
from electrum_dash_gui.qt.util import OkButton, WindowModalDialog
class Processor(threading.Thread):
polling_interval = 5*60
def __init__(self, imap_server, username, password, callback):
threading.Thread.__init__(self)
self.daemon = True
self.username = username
self.password = password
self.imap_server = imap_server
self.on_receive = callback
def poll(self):
try:
self.M.select()
except:
return
typ, data = self.M.search(None, 'ALL')
for num in data[0].split():
typ, msg_data = self.M.fetch(num, '(RFC822)')
msg = email.message_from_string(msg_data[0][1])
p = msg.get_payload()
if not msg.is_multipart():
p = [p]
continue
for item in p:
if item.get_content_type() == "application/dash-paymentrequest":
pr_str = item.get_payload()
pr_str = base64.b64decode(pr_str)
self.on_receive(pr_str)
def run(self):
self.M = imaplib.IMAP4_SSL(self.imap_server)
self.M.login(self.username, self.password)
while True:
self.poll()
time.sleep(self.polling_interval)
self.M.close()
self.M.logout()
def send(self, recipient, message, payment_request):
msg = MIMEMultipart()
msg['Subject'] = message
msg['To'] = recipient
msg['From'] = self.username
part = MIMEBase('application', "dash-paymentrequest")
part.set_payload(payment_request)
encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="payreq.dash"')
msg.attach(part)
s = smtplib.SMTP_SSL(self.imap_server, timeout=2)
s.login(self.username, self.password)
s.sendmail(self.username, [recipient], msg.as_string())
s.quit()
class QEmailSignalObject(QObject):
email_new_invoice_signal = pyqtSignal()
class Plugin(BasePlugin):
def fullname(self):
return 'Email'
def description(self):
return _("Send and receive payment requests via email")
def is_available(self):
return True
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.imap_server = self.config.get('email_server', '')
self.username = self.config.get('email_username', '')
self.password = self.config.get('email_password', '')
if self.imap_server and self.username and self.password:
self.processor = Processor(self.imap_server, self.username, self.password, self.on_receive)
self.processor.start()
self.obj = QEmailSignalObject()
self.obj.email_new_invoice_signal.connect(self.new_invoice)
def on_receive(self, pr_str):
self.print_error('received payment request')
self.pr = PaymentRequest(pr_str)
self.obj.email_new_invoice_signal.emit()
def new_invoice(self):
self.parent.invoices.add(self.pr)
#window.update_invoices_list()
@hook
def receive_list_menu(self, menu, addr):
window = menu.parentWidget()
menu.addAction(_("Send via e-mail"), lambda: self.send(window, addr))
def send(self, window, addr):
|
def requires_settings(self):
return True
def settings_widget(self, window):
return EnterButton(_('Settings'), partial(self.settings_dialog, window))
def settings_dialog(self, window):
d = WindowModalDialog(window, _("Email settings"))
d.setMinimumSize(500, 200)
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('Server hosting your email acount')))
grid = QGridLayout()
vbox.addLayout(grid)
grid.addWidget(QLabel('Server (IMAP)'), 0, 0)
server_e = QLineEdit()
server_e.setText(self.imap_server)
grid.addWidget(server_e, 0, 1)
grid.addWidget(QLabel('Username'), 1, 0)
username_e = QLineEdit()
username_e.setText(self.username)
grid.addWidget(username_e, 1, 1)
grid.addWidget(QLabel('Password'), 2, 0)
password_e = QLineEdit()
password_e.setText(self.password)
grid.addWidget(password_e, 2, 1)
vbox.addStretch()
vbox.addLayout(Buttons(CloseButton(d), OkButton(d)))
if not d.exec_():
return
server = str(server_e.text())
self.config.set_key('email_server', server)
username = str(username_e.text())
self.config.set_key('email_username', username)
password = str(password_e.text())
self.config.set_key('email_password', password)
| from electrum_dash import paymentrequest
r = window.wallet.receive_requests.get(addr)
message = r.get('memo', '')
if r.get('signature'):
pr = paymentrequest.serialize_request(r)
else:
pr = paymentrequest.make_request(self.config, r)
if not pr:
return
recipient, ok = QtGui.QInputDialog.getText(window, 'Send request', 'Email invoice to:')
if not ok:
return
recipient = str(recipient)
payload = pr.SerializeToString()
self.print_error('sending mail to', recipient)
try:
self.processor.send(recipient, message, payload)
except BaseException as e:
window.show_message(str(e))
return
window.show_message(_('Request sent.')) |
derive_traits.rs | mod common;
#[cfg(feature = "e2e-tests")]
use common::*;
#[cfg(feature = "e2e-tests")]
use cdrs_tokio::consistency::Consistency;
#[cfg(feature = "e2e-tests")]
use cdrs_tokio::frame::TryFromRow;
#[cfg(feature = "e2e-tests")]
use cdrs_tokio::query::QueryValues;
#[cfg(feature = "e2e-tests")]
use cdrs_tokio::query_values;
#[cfg(feature = "e2e-tests")]
use cdrs_tokio::statement::StatementParamsBuilder;
#[cfg(feature = "e2e-tests")]
use cdrs_tokio::types::blob::Blob;
#[cfg(feature = "e2e-tests")]
use cdrs_tokio::IntoCdrsValue;
#[cfg(feature = "e2e-tests")]
use cdrs_tokio::{TryFromRow, TryFromUdt};
#[cfg(feature = "e2e-tests")]
use maplit::hashmap;
#[cfg(feature = "e2e-tests")]
use std::collections::HashMap;
#[cfg(feature = "e2e-tests")]
use std::str::FromStr;
#[cfg(feature = "e2e-tests")]
use time::PrimitiveDateTime;
#[cfg(feature = "e2e-tests")]
use uuid::Uuid;
#[tokio::test]
#[cfg(feature = "e2e-tests")]
async fn simple_udt() {
let create_type_cql = "CREATE TYPE IF NOT EXISTS cdrs_test.derive_udt (my_text text)";
let create_table_cql = "CREATE TABLE IF NOT EXISTS cdrs_test.test_derived_udt \
(my_key int PRIMARY KEY, my_udt derive_udt, my_uuid uuid, my_blob blob)";
let session = setup_multiple(&[create_type_cql, create_table_cql])
.await
.expect("setup");
#[derive(Clone, Debug, IntoCdrsValue, TryFromRow, PartialEq)]
struct RowStruct {
my_key: i32,
my_udt: MyUdt,
my_uuid: Uuid,
my_blob: Blob,
}
impl RowStruct {
fn into_query_values(self) -> QueryValues {
query_values!("my_key" => self.my_key, "my_udt" => self.my_udt, "my_uuid" => self.my_uuid, "my_blob" => self.my_blob)
}
}
#[derive(Debug, Clone, PartialEq, IntoCdrsValue, TryFromUdt)]
struct MyUdt {
pub my_text: String,
}
let row_struct = RowStruct {
my_key: 1i32,
my_udt: MyUdt {
my_text: "my_text".to_string(),
},
my_uuid: Uuid::from_str("bb16106a-10bc-4a07-baa3-126ffe208c43").unwrap(),
my_blob: Blob::new(vec![]),
};
let cql = "INSERT INTO cdrs_test.test_derived_udt \
(my_key, my_udt, my_uuid, my_blob) VALUES (?, ?, ?, ?)";
session
.query_with_values(cql, row_struct.clone().into_query_values())
.await
.expect("insert");
let cql = "SELECT * FROM cdrs_test.test_derived_udt";
let rows = session
.query(cql)
.await
.expect("query")
.response_body()
.expect("get body")
.into_rows()
.expect("into rows");
assert_eq!(rows.len(), 1);
for row in rows { | }
#[tokio::test]
#[cfg(feature = "e2e-tests")]
async fn nested_udt() {
let create_type1_cql = "CREATE TYPE IF NOT EXISTS cdrs_test.nested_inner_udt (my_text text)";
let create_type2_cql = "CREATE TYPE IF NOT EXISTS cdrs_test.nested_outer_udt \
(my_inner_udt frozen<nested_inner_udt>)";
let create_table_cql = "CREATE TABLE IF NOT EXISTS cdrs_test.test_nested_udt \
(my_key int PRIMARY KEY, my_outer_udt nested_outer_udt)";
let session = setup_multiple(&[create_type1_cql, create_type2_cql, create_table_cql])
.await
.expect("setup");
#[derive(Clone, Debug, IntoCdrsValue, TryFromRow, PartialEq)]
struct RowStruct {
my_key: i32,
my_outer_udt: MyOuterUdt,
}
impl RowStruct {
fn into_query_values(self) -> QueryValues {
query_values!("my_key" => self.my_key, "my_outer_udt" => self.my_outer_udt)
}
}
#[derive(Clone, Debug, IntoCdrsValue, TryFromUdt, PartialEq)]
struct MyInnerUdt {
pub my_text: String,
}
#[derive(Clone, Debug, IntoCdrsValue, TryFromUdt, PartialEq)]
struct MyOuterUdt {
pub my_inner_udt: MyInnerUdt,
}
let row_struct = RowStruct {
my_key: 0,
my_outer_udt: MyOuterUdt {
my_inner_udt: MyInnerUdt {
my_text: "my_text".to_string(),
},
},
};
let cql = "INSERT INTO cdrs_test.test_nested_udt \
(my_key, my_outer_udt) VALUES (?, ?)";
session
.query_with_values(cql, row_struct.clone().into_query_values())
.await
.expect("insert");
let cql = "SELECT * FROM cdrs_test.test_nested_udt";
let rows = session
.query(cql)
.await
.expect("query")
.response_body()
.expect("get body")
.into_rows()
.expect("into rows");
assert_eq!(rows.len(), 1);
for row in rows {
let my_row_struct: RowStruct = RowStruct::try_from_row(row).expect("into RowStruct");
assert_eq!(my_row_struct, row_struct);
}
}
#[tokio::test]
#[cfg(feature = "e2e-tests")]
async fn alter_udt_add() {
let drop_table_cql = "DROP TABLE IF EXISTS cdrs_test.test_alter_udt_add";
let drop_type_cql = "DROP TYPE IF EXISTS cdrs_test.alter_udt_add_udt";
let create_type_cql = "CREATE TYPE cdrs_test.alter_udt_add_udt (my_text text)";
let create_table_cql = "CREATE TABLE IF NOT EXISTS cdrs_test.test_alter_udt_add \
(my_key int PRIMARY KEY, my_map frozen<map<text, alter_udt_add_udt>>)";
let session = setup_multiple(&[
drop_table_cql,
drop_type_cql,
create_type_cql,
create_table_cql,
])
.await
.expect("setup");
#[derive(Clone, Debug, IntoCdrsValue, TryFromRow, PartialEq)]
struct RowStruct {
my_key: i32,
my_map: HashMap<String, MyUdtA>,
}
impl RowStruct {
fn into_query_values(self) -> QueryValues {
query_values!("my_key" => self.my_key, "my_map" => self.my_map)
}
}
#[derive(Clone, Debug, IntoCdrsValue, TryFromUdt, PartialEq)]
struct MyUdtA {
pub my_text: String,
}
#[derive(Clone, Debug, IntoCdrsValue, TryFromRow, PartialEq)]
struct RowStructB {
my_key: i32,
my_map: HashMap<String, MyUdtB>,
}
#[derive(Clone, Debug, IntoCdrsValue, TryFromUdt, PartialEq)]
struct MyUdtB {
pub my_text: String,
pub my_timestamp: Option<PrimitiveDateTime>,
}
let row_struct = RowStruct {
my_key: 0,
my_map: hashmap! { "1".to_string() => MyUdtA {my_text: "my_text".to_string()} },
};
let cql = "INSERT INTO cdrs_test.test_alter_udt_add \
(my_key, my_map) VALUES (?, ?)";
session
.query_with_values(cql, row_struct.clone().into_query_values())
.await
.expect("insert");
let cql = "ALTER TYPE cdrs_test.alter_udt_add_udt ADD my_timestamp timestamp";
session.query(cql).await.expect("alter type");
let expected_nested_udt = MyUdtB {
my_text: "my_text".to_string(),
my_timestamp: None,
};
let cql = "SELECT * FROM cdrs_test.test_alter_udt_add";
let rows = session
.query(cql)
.await
.expect("query")
.response_body()
.expect("get body")
.into_rows()
.expect("into rows");
assert_eq!(rows.len(), 1);
for row in rows {
let altered_row: RowStructB = RowStructB::try_from_row(row).expect("into RowStructB");
assert_eq!(
altered_row.my_map,
hashmap! { "1".to_string() => expected_nested_udt.clone() }
);
}
}
#[tokio::test]
#[cfg(feature = "e2e-tests")]
async fn update_list_with_udt() {
let drop_table_cql = "DROP TABLE IF EXISTS cdrs_test.update_list_with_udt";
let drop_type_cql = "DROP TYPE IF EXISTS cdrs_test.update_list_with_udt";
let create_type_cql = "CREATE TYPE cdrs_test.update_list_with_udt (id uuid,
text text)";
let create_table_cql = "CREATE TABLE IF NOT EXISTS cdrs_test.update_list_with_udt \
(id uuid PRIMARY KEY, udts_set set<frozen<cdrs_test.update_list_with_udt>>)";
let session = setup_multiple(&[
drop_table_cql,
drop_type_cql,
create_type_cql,
create_table_cql,
])
.await
.expect("setup");
#[derive(Clone, Debug, IntoCdrsValue, TryFromRow, PartialEq)]
struct RowStruct {
id: Uuid,
udts_set: Vec<MyUdt>,
}
impl RowStruct {
fn into_query_values(self) -> QueryValues {
query_values!("id" => self.id, "udts_set" => self.udts_set)
}
}
#[derive(Clone, Debug, IntoCdrsValue, TryFromUdt, PartialEq)]
struct MyUdt {
pub id: Uuid,
pub text: String,
}
let row_struct = RowStruct {
id: Uuid::parse_str("5bd8877a-e2b2-4d6f-aafd-c3f72a6964cf").expect("row id"),
udts_set: vec![MyUdt {
id: Uuid::parse_str("08f49fa5-934b-4aff-8a87-f3a3287296ba").expect("udt id"),
text: "text".into(),
}],
};
let cql = "INSERT INTO cdrs_test.update_list_with_udt \
(id, udts_set) VALUES (?, ?)";
session
.query_with_values(cql, row_struct.clone().into_query_values())
.await
.expect("insert");
let query = session
.prepare("UPDATE cdrs_test.update_list_with_udt SET udts_set = udts_set + ? WHERE id = ?")
.await
.expect("prepare query");
let params = StatementParamsBuilder::new()
.with_consistency(Consistency::Quorum)
.with_values(query_values!(
vec![MyUdt {
id: Uuid::parse_str("68f49fa5-934b-4aff-8a87-f3a32872a6ba").expect("udt id"),
text: "abc".into(),
}],
Uuid::parse_str("5bd8877a-e2b2-4d6f-aafd-c3f72a6964cf").unwrap()
));
session
.exec_with_params(&query, ¶ms.build())
.await
.expect("update set");
let expected_row_struct = RowStruct {
id: Uuid::parse_str("5bd8877a-e2b2-4d6f-aafd-c3f72a6964cf").expect("row id"),
udts_set: vec![
MyUdt {
id: Uuid::parse_str("08f49fa5-934b-4aff-8a87-f3a3287296ba").expect("udt id"),
text: "text".into(),
},
MyUdt {
id: Uuid::parse_str("68f49fa5-934b-4aff-8a87-f3a32872a6ba").expect("udt id"),
text: "abc".into(),
},
],
};
let cql = "SELECT * FROM cdrs_test.update_list_with_udt";
let rows = session
.query(cql)
.await
.expect("query")
.response_body()
.expect("get body")
.into_rows()
.expect("into rows");
assert_eq!(rows.len(), 1);
for row in rows {
let altered_row: RowStruct = RowStruct::try_from_row(row).expect("into RowStruct");
assert_eq!(altered_row, expected_row_struct);
}
} | let my_udt_row: RowStruct = RowStruct::try_from_row(row).expect("into RowStruct");
assert_eq!(my_udt_row, row_struct);
} |
lease.go | // Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package namespace
import (
"bytes"
"context"
"go.etcd.io/etcd/v3/clientv3"
)
type leasePrefix struct {
clientv3.Lease
pfx []byte
}
// NewLease wraps a Lease interface to filter for only keys with a prefix
// and remove that prefix when fetching attached keys through TimeToLive.
func | (l clientv3.Lease, prefix string) clientv3.Lease {
return &leasePrefix{l, []byte(prefix)}
}
func (l *leasePrefix) TimeToLive(ctx context.Context, id clientv3.LeaseID, opts ...clientv3.LeaseOption) (*clientv3.LeaseTimeToLiveResponse, error) {
resp, err := l.Lease.TimeToLive(ctx, id, opts...)
if err != nil {
return nil, err
}
if len(resp.Keys) > 0 {
var outKeys [][]byte
for i := range resp.Keys {
if len(resp.Keys[i]) < len(l.pfx) {
// too short
continue
}
if !bytes.Equal(resp.Keys[i][:len(l.pfx)], l.pfx) {
// doesn't match prefix
continue
}
// strip prefix
outKeys = append(outKeys, resp.Keys[i][len(l.pfx):])
}
resp.Keys = outKeys
}
return resp, nil
}
| NewLease |
dino-details.e2e.ts | import { newE2EPage, E2EElement, E2EPage } from "@stencil/core/testing";
import { Dino } from "../../classes/Dino";
const tRex = new Dino("T-Rex");
tRex.description = "some description";
tRex.imageSrc = "imageSrc";
describe("dino-details", async () => {
let element: E2EElement;
let page: E2EPage;
beforeEach(async () => {
page = await newE2EPage({
html: `
<dino-details></dino-details>
`
});
element = await page.find("dino-details");
});
it("should work with a given dinosaur", async () => {
element.setProperty("dinosaur", tRex);
await page.waitForChanges();
expect(element.shadowRoot).toEqualHtml(
'<div><header><h2>T-Rex</h2></header><div class="dino-content"><img alt="T-Rex" src="imageSrc"><p>some description</p></div></div>'
);
});
it("should toggle on click", async () => {
element.setProperty("dinosaur", tRex); | await page.waitForChanges();
const headerMock = await page.find("dino-details >>> header");
await headerMock.click();
await page.waitForChanges();
const content = element.shadowRoot.querySelector("div.dino-content");
expect(content).toHaveClasses(["hide-content", "dino-content"]);
});
}); | |
configuration.py | import logging
from cached_property import threaded_cached_property
from .credentials import BaseCredentials
from .protocol import RetryPolicy, FailFast
from .transport import AUTH_TYPE_MAP
from .util import split_url
from .version import Version
log = logging.getLogger(__name__)
class Configuration:
"""
Assembles a connection protocol when autodiscover is not used.
If the server is not configured with autodiscover, the following should be sufficient:
config = Configuration(server='example.com', credentials=Credentials('MYWINDOMAIN\\myusername', 'topsecret'))
account = Account(primary_smtp_address='[email protected]', config=config)
You can also set the EWS service endpoint directly:
config = Configuration(service_endpoint='https://mail.example.com/EWS/Exchange.asmx', credentials=...)
If you know which authentication type the server uses, you add that as a hint:
config = Configuration(service_endpoint='https://example.com/EWS/Exchange.asmx', auth_type=NTLM, credentials=..)
If you want to use autodiscover, don't use a Configuration object. Instead, set up an account like this:
credentials = Credentials(username='MYWINDOMAIN\\myusername', password='topsecret')
account = Account(primary_smtp_address='[email protected]', credentials=credentials, autodiscover=True)
"""
def __init__(self, credentials=None, server=None, service_endpoint=None, auth_type=None, version=None,
retry_policy=None):
if not isinstance(credentials, (BaseCredentials, type(None))):
raise ValueError("'credentials' %r must be a Credentials instance" % credentials)
if server and service_endpoint:
raise AttributeError("Only one of 'server' or 'service_endpoint' must be provided")
if auth_type is not None and auth_type not in AUTH_TYPE_MAP:
raise ValueError("'auth_type' %r must be one of %s"
% (auth_type, ', '.join("'%s'" % k for k in sorted(AUTH_TYPE_MAP.keys()))))
if not retry_policy:
retry_policy = FailFast()
if not isinstance(version, (Version, type(None))):
raise ValueError("'version' %r must be a Version instance" % version)
if not isinstance(retry_policy, RetryPolicy):
raise ValueError("'retry_policy' %r must be a RetryPolicy instance" % retry_policy)
self._credentials = credentials
if server:
self.service_endpoint = 'https://%s/EWS/Exchange.asmx' % server
else:
self.service_endpoint = service_endpoint
self.auth_type = auth_type
self.version = version
self.retry_policy = retry_policy
@property
def credentials(self):
# Do not update credentials from this class. Instead, do it from Protocol
return self._credentials
@threaded_cached_property | return None
return split_url(self.service_endpoint)[1]
def __repr__(self):
return self.__class__.__name__ + '(%s)' % ', '.join('%s=%r' % (k, getattr(self, k)) for k in (
'credentials', 'service_endpoint', 'auth_type', 'version', 'retry_policy'
)) | def server(self):
if not self.service_endpoint: |
sessionActions.js | import * as types from '../constants/ActionTypes';
export const load = () => ({ type: types.LOAD });
export const login = () => ({ type: types.LOGIN });
export const loginSuccess = () => ({
type: types.LOGIN_SUCCESS
});
export const loginFailure = refresh_token => ({
type: types.LOGIN_FAILURE,
refresh_token
});
export const updateToken = refreshToken => ({
type: types.UPDATE_TOKEN, | export const updateTokenSuccess = access_token => ({
type: types.UPDATE_TOKEN_SUCCESS,
access_token
});
export const updateCurrentUser = user => ({
type: types.UPDATE_CURRENT_USER,
user
}); | refreshToken
}); |
chunk_store.go | package chunk
import (
"context"
"flag"
"fmt"
"net/http"
"sort"
"sync"
"time"
"github.com/go-kit/kit/log/level"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/promql"
"github.com/weaveworks/common/httpgrpc"
"github.com/cortexproject/cortex/pkg/chunk/cache"
"github.com/cortexproject/cortex/pkg/util"
"github.com/cortexproject/cortex/pkg/util/extract"
"github.com/cortexproject/cortex/pkg/util/spanlogger"
"github.com/cortexproject/cortex/pkg/util/validation"
)
var (
ErrMetricNameLabelMissing = errors.New("metric name label missing")
ErrParialDeleteChunkNoOverlap = errors.New("interval for partial deletion has not overlap with chunk interval")
)
var (
indexEntriesPerChunk = promauto.NewHistogram(prometheus.HistogramOpts{
Namespace: "cortex",
Name: "chunk_store_index_entries_per_chunk",
Help: "Number of entries written to storage per chunk.",
Buckets: prometheus.ExponentialBuckets(1, 2, 5),
})
cacheCorrupt = promauto.NewCounter(prometheus.CounterOpts{
Namespace: "cortex",
Name: "cache_corrupt_chunks_total",
Help: "Total count of corrupt chunks found in cache.",
})
)
// StoreConfig specifies config for a ChunkStore
type StoreConfig struct {
ChunkCacheConfig cache.Config `yaml:"chunk_cache_config"`
WriteDedupeCacheConfig cache.Config `yaml:"write_dedupe_cache_config"`
CacheLookupsOlderThan model.Duration `yaml:"cache_lookups_older_than"`
// Limits query start time to be greater than now() - MaxLookBackPeriod, if set.
MaxLookBackPeriod model.Duration `yaml:"max_look_back_period"`
// Not visible in yaml because the setting shouldn't be common between ingesters and queriers.
// This exists in case we don't want to cache all the chunks but still want to take advantage of
// ingester chunk write deduplication. But for the queriers we need the full value. So when this option
// is set, use different caches for ingesters and queriers.
chunkCacheStubs bool // don't write the full chunk to cache, just a stub entry
}
// RegisterFlags adds the flags required to config this to the given FlagSet
func (cfg *StoreConfig) RegisterFlags(f *flag.FlagSet) {
cfg.ChunkCacheConfig.RegisterFlagsWithPrefix("store.chunks-cache.", "Cache config for chunks. ", f)
f.BoolVar(&cfg.chunkCacheStubs, "store.chunks-cache.cache-stubs", false, "If true, don't write the full chunk to cache, just a stub entry.")
cfg.WriteDedupeCacheConfig.RegisterFlagsWithPrefix("store.index-cache-write.", "Cache config for index entry writing. ", f)
f.Var(&cfg.CacheLookupsOlderThan, "store.cache-lookups-older-than", "Cache index entries older than this period. 0 to disable.")
f.Var(&cfg.MaxLookBackPeriod, "store.max-look-back-period", "Limit how long back data can be queried")
}
func (cfg *StoreConfig) Validate() error {
if err := cfg.ChunkCacheConfig.Validate(); err != nil {
return err
}
if err := cfg.WriteDedupeCacheConfig.Validate(); err != nil {
return err
}
return nil
}
type baseStore struct {
cfg StoreConfig
index IndexClient
chunks Client
schema BaseSchema
limits StoreLimits
*Fetcher
}
func newBaseStore(cfg StoreConfig, schema BaseSchema, index IndexClient, chunks Client, limits StoreLimits, chunksCache cache.Cache) (baseStore, error) |
// store implements Store
type store struct {
baseStore
schema StoreSchema
}
func newStore(cfg StoreConfig, schema StoreSchema, index IndexClient, chunks Client, limits StoreLimits, chunksCache cache.Cache) (Store, error) {
rs, err := newBaseStore(cfg, schema, index, chunks, limits, chunksCache)
if err != nil {
return nil, err
}
return &store{
baseStore: rs,
schema: schema,
}, nil
}
// Stop any background goroutines (ie in the cache.)
func (c *store) Stop() {
c.storage.Stop()
c.Fetcher.Stop()
c.index.Stop()
}
// Put implements ChunkStore
func (c *store) Put(ctx context.Context, chunks []Chunk) error {
for _, chunk := range chunks {
if err := c.PutOne(ctx, chunk.From, chunk.Through, chunk); err != nil {
return err
}
}
return nil
}
// PutOne implements ChunkStore
func (c *store) PutOne(ctx context.Context, from, through model.Time, chunk Chunk) error {
log, ctx := spanlogger.New(ctx, "ChunkStore.PutOne")
chunks := []Chunk{chunk}
err := c.storage.PutChunks(ctx, chunks)
if err != nil {
return err
}
if cacheErr := c.writeBackCache(ctx, chunks); cacheErr != nil {
level.Warn(log).Log("msg", "could not store chunks in chunk cache", "err", cacheErr)
}
writeReqs, err := c.calculateIndexEntries(chunk.UserID, from, through, chunk)
if err != nil {
return err
}
return c.index.BatchWrite(ctx, writeReqs)
}
// calculateIndexEntries creates a set of batched WriteRequests for all the chunks it is given.
func (c *store) calculateIndexEntries(userID string, from, through model.Time, chunk Chunk) (WriteBatch, error) {
seenIndexEntries := map[string]struct{}{}
metricName := chunk.Metric.Get(labels.MetricName)
if metricName == "" {
return nil, ErrMetricNameLabelMissing
}
entries, err := c.schema.GetWriteEntries(from, through, userID, metricName, chunk.Metric, chunk.ExternalKey())
if err != nil {
return nil, err
}
indexEntriesPerChunk.Observe(float64(len(entries)))
// Remove duplicate entries based on tableName:hashValue:rangeValue
result := c.index.NewWriteBatch()
for _, entry := range entries {
key := fmt.Sprintf("%s:%s:%x", entry.TableName, entry.HashValue, entry.RangeValue)
if _, ok := seenIndexEntries[key]; !ok {
seenIndexEntries[key] = struct{}{}
result.Add(entry.TableName, entry.HashValue, entry.RangeValue, entry.Value)
}
}
return result, nil
}
// Get implements Store
func (c *store) Get(ctx context.Context, userID string, from, through model.Time, allMatchers ...*labels.Matcher) ([]Chunk, error) {
log, ctx := spanlogger.New(ctx, "ChunkStore.Get")
defer log.Span.Finish()
level.Debug(log).Log("from", from, "through", through, "matchers", len(allMatchers))
// Validate the query is within reasonable bounds.
metricName, matchers, shortcut, err := c.validateQuery(ctx, userID, &from, &through, allMatchers)
if err != nil {
return nil, err
} else if shortcut {
return nil, nil
}
log.Span.SetTag("metric", metricName)
return c.getMetricNameChunks(ctx, userID, from, through, matchers, metricName)
}
func (c *store) GetChunkRefs(ctx context.Context, userID string, from, through model.Time, allMatchers ...*labels.Matcher) ([][]Chunk, []*Fetcher, error) {
return nil, nil, errors.New("not implemented")
}
// LabelValuesForMetricName retrieves all label values for a single label name and metric name.
func (c *baseStore) LabelValuesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName, labelName string) ([]string, error) {
log, ctx := spanlogger.New(ctx, "ChunkStore.LabelValues")
defer log.Span.Finish()
level.Debug(log).Log("from", from, "through", through, "metricName", metricName, "labelName", labelName)
shortcut, err := c.validateQueryTimeRange(ctx, userID, &from, &through)
if err != nil {
return nil, err
} else if shortcut {
return nil, nil
}
queries, err := c.schema.GetReadQueriesForMetricLabel(from, through, userID, metricName, labelName)
if err != nil {
return nil, err
}
entries, err := c.lookupEntriesByQueries(ctx, queries)
if err != nil {
return nil, err
}
var result UniqueStrings
for _, entry := range entries {
_, labelValue, _, err := parseChunkTimeRangeValue(entry.RangeValue, entry.Value)
if err != nil {
return nil, err
}
result.Add(string(labelValue))
}
return result.Strings(), nil
}
// LabelNamesForMetricName retrieves all label names for a metric name.
func (c *store) LabelNamesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string) ([]string, error) {
log, ctx := spanlogger.New(ctx, "ChunkStore.LabelNamesForMetricName")
defer log.Span.Finish()
level.Debug(log).Log("from", from, "through", through, "metricName", metricName)
shortcut, err := c.validateQueryTimeRange(ctx, userID, &from, &through)
if err != nil {
return nil, err
} else if shortcut {
return nil, nil
}
chunks, err := c.lookupChunksByMetricName(ctx, userID, from, through, nil, metricName)
if err != nil {
return nil, err
}
level.Debug(log).Log("msg", "Chunks in index", "chunks", len(chunks))
// Filter out chunks that are not in the selected time range and keep a single chunk per fingerprint
filtered := filterChunksByTime(from, through, chunks)
filtered, keys := filterChunksByUniqueFingerprint(filtered)
level.Debug(log).Log("msg", "Chunks post filtering", "chunks", len(chunks))
// Now fetch the actual chunk data from Memcache / S3
allChunks, err := c.FetchChunks(ctx, filtered, keys)
if err != nil {
level.Error(log).Log("msg", "FetchChunks", "err", err)
return nil, err
}
return labelNamesFromChunks(allChunks), nil
}
func (c *baseStore) validateQueryTimeRange(ctx context.Context, userID string, from *model.Time, through *model.Time) (bool, error) {
//nolint:ineffassign,staticcheck //Leaving ctx even though we don't currently use it, we want to make it available for when we might need it and hopefully will ensure us using the correct context at that time
log, ctx := spanlogger.New(ctx, "store.validateQueryTimeRange")
defer log.Span.Finish()
if *through < *from {
return false, httpgrpc.Errorf(http.StatusBadRequest, "invalid query, through < from (%s < %s)", through, from)
}
maxQueryLength := c.limits.MaxQueryLength(userID)
if maxQueryLength > 0 && (*through).Sub(*from) > maxQueryLength {
return false, httpgrpc.Errorf(http.StatusBadRequest, validation.ErrQueryTooLong, (*through).Sub(*from), maxQueryLength)
}
now := model.Now()
if from.After(now) {
// time-span start is in future ... regard as legal
level.Info(log).Log("msg", "whole timerange in future, yield empty resultset", "through", through, "from", from, "now", now)
return true, nil
}
if c.cfg.MaxLookBackPeriod != 0 {
oldestStartTime := model.Now().Add(-time.Duration(c.cfg.MaxLookBackPeriod))
if oldestStartTime.After(*from) {
*from = oldestStartTime
}
}
if through.After(now.Add(5 * time.Minute)) {
// time-span end is in future ... regard as legal
level.Info(log).Log("msg", "adjusting end timerange from future to now", "old_through", through, "new_through", now)
*through = now // Avoid processing future part - otherwise some schemas could fail with eg non-existent table gripes
}
return false, nil
}
func (c *baseStore) validateQuery(ctx context.Context, userID string, from *model.Time, through *model.Time, matchers []*labels.Matcher) (string, []*labels.Matcher, bool, error) {
log, ctx := spanlogger.New(ctx, "store.validateQuery")
defer log.Span.Finish()
shortcut, err := c.validateQueryTimeRange(ctx, userID, from, through)
if err != nil {
return "", nil, false, err
}
if shortcut {
return "", nil, true, nil
}
// Check there is a metric name matcher of type equal,
metricNameMatcher, matchers, ok := extract.MetricNameMatcherFromMatchers(matchers)
if !ok || metricNameMatcher.Type != labels.MatchEqual {
return "", nil, false, httpgrpc.Errorf(http.StatusBadRequest, "query must contain metric name")
}
return metricNameMatcher.Value, matchers, false, nil
}
func (c *store) getMetricNameChunks(ctx context.Context, userID string, from, through model.Time, allMatchers []*labels.Matcher, metricName string) ([]Chunk, error) {
log, ctx := spanlogger.New(ctx, "ChunkStore.getMetricNameChunks")
defer log.Finish()
level.Debug(log).Log("from", from, "through", through, "metricName", metricName, "matchers", len(allMatchers))
filters, matchers := util.SplitFiltersAndMatchers(allMatchers)
chunks, err := c.lookupChunksByMetricName(ctx, userID, from, through, matchers, metricName)
if err != nil {
return nil, err
}
level.Debug(log).Log("Chunks in index", len(chunks))
// Filter out chunks that are not in the selected time range.
filtered := filterChunksByTime(from, through, chunks)
level.Debug(log).Log("Chunks post filtering", len(chunks))
maxChunksPerQuery := c.limits.MaxChunksPerQuery(userID)
if maxChunksPerQuery > 0 && len(filtered) > maxChunksPerQuery {
err := httpgrpc.Errorf(http.StatusBadRequest, "Query %v fetched too many chunks (%d > %d)", allMatchers, len(filtered), maxChunksPerQuery)
level.Error(log).Log("err", err)
return nil, err
}
// Now fetch the actual chunk data from Memcache / S3
keys := keysFromChunks(filtered)
allChunks, err := c.FetchChunks(ctx, filtered, keys)
if err != nil {
return nil, promql.ErrStorage{Err: err}
}
// Filter out chunks based on the empty matchers in the query.
filteredChunks := filterChunksByMatchers(allChunks, filters)
return filteredChunks, nil
}
func (c *store) lookupChunksByMetricName(ctx context.Context, userID string, from, through model.Time, matchers []*labels.Matcher, metricName string) ([]Chunk, error) {
log, ctx := spanlogger.New(ctx, "ChunkStore.lookupChunksByMetricName")
defer log.Finish()
// Just get chunks for metric if there are no matchers
if len(matchers) == 0 {
queries, err := c.schema.GetReadQueriesForMetric(from, through, userID, metricName)
if err != nil {
return nil, err
}
level.Debug(log).Log("queries", len(queries))
entries, err := c.lookupEntriesByQueries(ctx, queries)
if err != nil {
return nil, err
}
level.Debug(log).Log("entries", len(entries))
chunkIDs, err := c.parseIndexEntries(ctx, entries, nil)
if err != nil {
return nil, err
}
level.Debug(log).Log("chunkIDs", len(chunkIDs))
return c.convertChunkIDsToChunks(ctx, userID, chunkIDs)
}
// Otherwise get chunks which include other matchers
incomingChunkIDs := make(chan []string)
incomingErrors := make(chan error)
for _, matcher := range matchers {
go func(matcher *labels.Matcher) {
chunkIDs, err := c.lookupIdsByMetricNameMatcher(ctx, from, through, userID, metricName, matcher, nil)
if err != nil {
incomingErrors <- err
} else {
incomingChunkIDs <- chunkIDs
}
}(matcher)
}
// Receive chunkSets from all matchers
var chunkIDs []string
var lastErr error
for i := 0; i < len(matchers); i++ {
select {
case incoming := <-incomingChunkIDs:
if chunkIDs == nil {
chunkIDs = incoming
} else {
chunkIDs = intersectStrings(chunkIDs, incoming)
}
case err := <-incomingErrors:
lastErr = err
}
}
if lastErr != nil {
return nil, lastErr
}
level.Debug(log).Log("msg", "post intersection", "chunkIDs", len(chunkIDs))
// Convert IndexEntry's into chunks
return c.convertChunkIDsToChunks(ctx, userID, chunkIDs)
}
func (c *baseStore) lookupIdsByMetricNameMatcher(ctx context.Context, from, through model.Time, userID, metricName string, matcher *labels.Matcher, filter func([]IndexQuery) []IndexQuery) ([]string, error) {
log, ctx := spanlogger.New(ctx, "Store.lookupIdsByMetricNameMatcher", "metricName", metricName, "matcher", matcher)
defer log.Span.Finish()
var err error
var queries []IndexQuery
var labelName string
if matcher == nil {
queries, err = c.schema.GetReadQueriesForMetric(from, through, userID, metricName)
} else if matcher.Type == labels.MatchEqual {
labelName = matcher.Name
queries, err = c.schema.GetReadQueriesForMetricLabelValue(from, through, userID, metricName, matcher.Name, matcher.Value)
} else if matcher.Type == labels.MatchRegexp && len(FindSetMatches(matcher.Value)) > 0 {
set := FindSetMatches(matcher.Value)
for _, v := range set {
var qs []IndexQuery
qs, err = c.schema.GetReadQueriesForMetricLabelValue(from, through, userID, metricName, matcher.Name, v)
if err != nil {
break
}
queries = append(queries, qs...)
}
} else {
labelName = matcher.Name
queries, err = c.schema.GetReadQueriesForMetricLabel(from, through, userID, metricName, matcher.Name)
}
if err != nil {
return nil, err
}
level.Debug(log).Log("matcher", matcher, "queries", len(queries))
if filter != nil {
queries = filter(queries)
level.Debug(log).Log("matcher", matcher, "filteredQueries", len(queries))
}
entries, err := c.lookupEntriesByQueries(ctx, queries)
if e, ok := err.(CardinalityExceededError); ok {
e.MetricName = metricName
e.LabelName = labelName
return nil, e
} else if err != nil {
return nil, err
}
level.Debug(log).Log("matcher", matcher, "entries", len(entries))
ids, err := c.parseIndexEntries(ctx, entries, matcher)
if err != nil {
return nil, err
}
level.Debug(log).Log("matcher", matcher, "ids", len(ids))
return ids, nil
}
func (c *baseStore) lookupEntriesByQueries(ctx context.Context, queries []IndexQuery) ([]IndexEntry, error) {
log, ctx := spanlogger.New(ctx, "store.lookupEntriesByQueries")
defer log.Span.Finish()
var lock sync.Mutex
var entries []IndexEntry
err := c.index.QueryPages(ctx, queries, func(query IndexQuery, resp ReadBatch) bool {
iter := resp.Iterator()
lock.Lock()
for iter.Next() {
entries = append(entries, IndexEntry{
TableName: query.TableName,
HashValue: query.HashValue,
RangeValue: iter.RangeValue(),
Value: iter.Value(),
})
}
lock.Unlock()
return true
})
if err != nil {
level.Error(util.WithContext(ctx, util.Logger)).Log("msg", "error querying storage", "err", err)
}
return entries, err
}
func (c *baseStore) parseIndexEntries(ctx context.Context, entries []IndexEntry, matcher *labels.Matcher) ([]string, error) {
result := make([]string, 0, len(entries))
for _, entry := range entries {
chunkKey, labelValue, _, err := parseChunkTimeRangeValue(entry.RangeValue, entry.Value)
if err != nil {
return nil, err
}
if matcher != nil && !matcher.Matches(string(labelValue)) {
continue
}
result = append(result, chunkKey)
}
// Return ids sorted and deduped because they will be merged with other sets.
sort.Strings(result)
result = uniqueStrings(result)
return result, nil
}
func (c *baseStore) convertChunkIDsToChunks(ctx context.Context, userID string, chunkIDs []string) ([]Chunk, error) {
chunkSet := make([]Chunk, 0, len(chunkIDs))
for _, chunkID := range chunkIDs {
chunk, err := ParseExternalKey(userID, chunkID)
if err != nil {
return nil, err
}
chunkSet = append(chunkSet, chunk)
}
return chunkSet, nil
}
func (c *store) DeleteChunk(ctx context.Context, from, through model.Time, userID, chunkID string, metric labels.Labels, partiallyDeletedInterval *model.Interval) error {
metricName := metric.Get(model.MetricNameLabel)
if metricName == "" {
return ErrMetricNameLabelMissing
}
chunkWriteEntries, err := c.schema.GetWriteEntries(from, through, userID, string(metricName), metric, chunkID)
if err != nil {
return errors.Wrapf(err, "when getting index entries to delete for chunkID=%s", chunkID)
}
return c.deleteChunk(ctx, userID, chunkID, metric, chunkWriteEntries, partiallyDeletedInterval, func(chunk Chunk) error {
return c.PutOne(ctx, chunk.From, chunk.Through, chunk)
})
}
func (c *baseStore) deleteChunk(ctx context.Context,
userID string,
chunkID string,
metric labels.Labels,
chunkWriteEntries []IndexEntry,
partiallyDeletedInterval *model.Interval,
putChunkFunc func(chunk Chunk) error) error {
metricName := metric.Get(model.MetricNameLabel)
if metricName == "" {
return ErrMetricNameLabelMissing
}
// if chunk is partially deleted, fetch it, slice non-deleted portion and put it to store before deleting original chunk
if partiallyDeletedInterval != nil {
err := c.reboundChunk(ctx, userID, chunkID, *partiallyDeletedInterval, putChunkFunc)
if err != nil {
return errors.Wrapf(err, "chunkID=%s", chunkID)
}
}
batch := c.index.NewWriteBatch()
for i := range chunkWriteEntries {
batch.Delete(chunkWriteEntries[i].TableName, chunkWriteEntries[i].HashValue, chunkWriteEntries[i].RangeValue)
}
err := c.index.BatchWrite(ctx, batch)
if err != nil {
return errors.Wrapf(err, "when deleting index entries for chunkID=%s", chunkID)
}
err = c.chunks.DeleteChunk(ctx, chunkID)
if err != nil {
if err == ErrStorageObjectNotFound {
return nil
}
return errors.Wrapf(err, "when deleting chunk from storage with chunkID=%s", chunkID)
}
return nil
}
func (c *baseStore) reboundChunk(ctx context.Context, userID, chunkID string, partiallyDeletedInterval model.Interval, putChunkFunc func(chunk Chunk) error) error {
chunk, err := ParseExternalKey(userID, chunkID)
if err != nil {
return errors.Wrap(err, "when parsing external key")
}
if !intervalsOverlap(model.Interval{Start: chunk.From, End: chunk.Through}, partiallyDeletedInterval) {
return ErrParialDeleteChunkNoOverlap
}
chunks, err := c.Fetcher.FetchChunks(ctx, []Chunk{chunk}, []string{chunkID})
if err != nil {
if err == ErrStorageObjectNotFound {
return nil
}
return errors.Wrap(err, "when fetching chunk from storage for slicing")
}
if len(chunks) != 1 {
return fmt.Errorf("expected to get 1 chunk from storage got %d instead", len(chunks))
}
chunk = chunks[0]
var newChunks []*Chunk
if partiallyDeletedInterval.Start > chunk.From {
newChunk, err := chunk.Slice(chunk.From, partiallyDeletedInterval.Start-1)
if err != nil && err != ErrSliceNoDataInRange {
return errors.Wrapf(err, "when slicing chunk for interval %d - %d", chunk.From, partiallyDeletedInterval.Start-1)
}
if newChunk != nil {
newChunks = append(newChunks, newChunk)
}
}
if partiallyDeletedInterval.End < chunk.Through {
newChunk, err := chunk.Slice(partiallyDeletedInterval.End+1, chunk.Through)
if err != nil && err != ErrSliceNoDataInRange {
return errors.Wrapf(err, "when slicing chunk for interval %d - %d", partiallyDeletedInterval.End+1, chunk.Through)
}
if newChunk != nil {
newChunks = append(newChunks, newChunk)
}
}
for _, newChunk := range newChunks {
if err := newChunk.Encode(); err != nil {
return errors.Wrapf(err, "when encoding new chunk formed after slicing for interval %d - %d", newChunk.From, newChunk.Through)
}
err = putChunkFunc(*newChunk)
if err != nil {
return errors.Wrapf(err, "when putting new chunk formed after slicing for interval %d - %d", newChunk.From, newChunk.Through)
}
}
return nil
}
func (c *store) DeleteSeriesIDs(ctx context.Context, from, through model.Time, userID string, metric labels.Labels) error {
// SeriesID is something which is only used in SeriesStore so we need not do anything here
return nil
}
| {
fetcher, err := NewChunkFetcher(chunksCache, cfg.chunkCacheStubs, chunks)
if err != nil {
return baseStore{}, err
}
return baseStore{
cfg: cfg,
index: index,
chunks: chunks,
schema: schema,
limits: limits,
Fetcher: fetcher,
}, nil
} |
velocity_xyz.rs | use crate::precision::PrecisionExt;
use core::fmt;
/// Contains the X, Y and Z components of the GNSS/INS velocity in m/s
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct VelocityXYZ<T: PrecisionExt> {
pub x: T,
pub y: T,
pub z: T,
}
impl<T: PrecisionExt> fmt::Display for VelocityXYZ<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "X({:.3}), Y({:.3}), Z({:.3})", self.x, self.y, self.z)
}
}
precision_float32_3field_wire_impl!(VelocityXYZ, x, y, z); | precision_float64_3field_wire_impl!(VelocityXYZ, x, y, z);
precision_fp1220_3field_wire_impl!(VelocityXYZ, x, y, z);
precision_fp1632_3field_wire_impl!(VelocityXYZ, x, y, z); |
|
basic_metric_lister.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package provider
import (
"context"
"fmt"
"time"
pmodel "github.com/prometheus/common/model"
"k8s.io/klog/v2"
prom "github.com/kubernetes-sigs/prometheus-adapter/pkg/client"
"github.com/kubernetes-sigs/prometheus-adapter/pkg/naming"
)
// Runnable represents something that can be run until told to stop.
type Runnable interface {
// Run runs the runnable forever.
Run()
// RunUntil runs the runnable until the given channel is closed.
RunUntil(stopChan <-chan struct{})
}
// A MetricLister provides a window into all of the metrics that are available within a given
// Prometheus instance, classified as either Custom or External metrics, but presented generically
// so that it can manage both types simultaneously.
type MetricLister interface {
ListAllMetrics() (MetricUpdateResult, error)
}
// A MetricListerWithNotification is a MetricLister that has the ability to notify listeners
// when new metric data is available.
type MetricListerWithNotification interface {
MetricLister
Runnable
// AddNotificationReceiver registers a callback to be invoked when new metric data is available.
AddNotificationReceiver(MetricUpdateCallback)
// UpdateNow forces an immediate refresh from the source data. Primarily for test purposes.
UpdateNow()
}
type basicMetricLister struct {
promClient prom.Client
namers []naming.MetricNamer
lookback time.Duration
}
// NewBasicMetricLister creates a MetricLister that is capable of interactly directly with Prometheus to list metrics.
func | (promClient prom.Client, namers []naming.MetricNamer, lookback time.Duration) MetricLister {
lister := basicMetricLister{
promClient: promClient,
namers: namers,
lookback: lookback,
}
return &lister
}
type selectorSeries struct {
selector prom.Selector
series []prom.Series
}
func (l *basicMetricLister) ListAllMetrics() (MetricUpdateResult, error) {
result := MetricUpdateResult{
series: make([][]prom.Series, 0),
namers: make([]naming.MetricNamer, 0),
}
startTime := pmodel.Now().Add(-1 * l.lookback)
// these can take a while on large clusters, so launch in parallel
// and don't duplicate
selectors := make(map[prom.Selector]struct{})
selectorSeriesChan := make(chan selectorSeries, len(l.namers))
errs := make(chan error, len(l.namers))
for _, converter := range l.namers {
sel := converter.Selector()
if _, ok := selectors[sel]; ok {
errs <- nil
selectorSeriesChan <- selectorSeries{}
continue
}
selectors[sel] = struct{}{}
go func() {
series, err := l.promClient.Series(context.TODO(), pmodel.Interval{startTime, 0}, sel)
if err != nil {
errs <- fmt.Errorf("unable to fetch metrics for query %q: %v", sel, err)
return
}
errs <- nil
// Push into the channel: "this selector produced these series"
selectorSeriesChan <- selectorSeries{
selector: sel,
series: series,
}
}()
}
// don't do duplicate queries when it's just the matchers that change
seriesCacheByQuery := make(map[prom.Selector][]prom.Series)
// iterate through, blocking until we've got all results
// We know that, from above, we should have pushed one item into the channel
// for each converter. So here, we'll assume that we should receive one item per converter.
for range l.namers {
if err := <-errs; err != nil {
return result, fmt.Errorf("unable to update list of all metrics: %v", err)
}
// Receive from the channel: "this selector produced these series"
// We stuff that into this map so that we can collect the data as it arrives
// and then, once we've received it all, we can process it below.
if ss := <-selectorSeriesChan; ss.series != nil {
seriesCacheByQuery[ss.selector] = ss.series
}
}
close(errs)
// Now that we've collected all of the results into `seriesCacheByQuery`
// we can start processing them.
newSeries := make([][]prom.Series, len(l.namers))
for i, namer := range l.namers {
series, cached := seriesCacheByQuery[namer.Selector()]
if !cached {
return result, fmt.Errorf("unable to update list of all metrics: no metrics retrieved for query %q", namer.Selector())
}
// Because converters provide a "post-filtering" option, it's not enough to
// simply take all the series that were produced. We need to further filter them.
newSeries[i] = namer.FilterSeries(series)
}
klog.V(10).Infof("Set available metric list from Prometheus to: %v", newSeries)
result.series = newSeries
result.namers = l.namers
return result, nil
}
// MetricUpdateResult represents the output of a periodic inspection of metrics found to be
// available in Prometheus.
// It includes both the series data the Prometheus exposed, as well as the configurational
// object that led to their discovery.
type MetricUpdateResult struct {
series [][]prom.Series
namers []naming.MetricNamer
}
// MetricUpdateCallback is a function signature for receiving periodic updates about
// available metrics.
type MetricUpdateCallback func(MetricUpdateResult)
| NewBasicMetricLister |
match.go | // Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package language
import (
"errors"
"strings"
"golang.org/x/text/internal/language"
)
// A MatchOption configures a Matcher.
type MatchOption func(*matcher)
// PreferSameScript will, in the absence of a match, result in the first
// preferred tag with the same script as a supported tag to match this supported
// tag. The default is currently true, but this may change in the future.
func PreferSameScript(preferSame bool) MatchOption {
return func(m *matcher) { m.preferSameScript = preferSame }
}
// TODO(v1.0.0): consider making Matcher a concrete type, instead of interface.
// There doesn't seem to be too much need for multiple types.
// Making it a concrete type allows MatchStrings to be a method, which will
// improve its discoverability.
// MatchStrings parses and matches the given strings until one of them matches
// the language in the Matcher. A string may be an Accept-Language header as
// handled by ParseAcceptLanguage. The default language is returned if no
// other language matched.
func MatchStrings(m Matcher, lang ...string) (tag Tag, index int) {
for _, accept := range lang {
desired, _, err := ParseAcceptLanguage(accept)
if err != nil {
continue
}
if tag, index, conf := m.Match(desired...); conf != No {
return tag, index
}
}
tag, index, _ = m.Match()
return
}
// Matcher is the interface that wraps the Match method.
//
// Match returns the best match for any of the given tags, along with
// a unique index associated with the returned tag and a confidence
// score.
type Matcher interface {
Match(t ...Tag) (tag Tag, index int, c Confidence)
}
// Comprehends reports the confidence score for a speaker of a given language
// to being able to comprehend the written form of an alternative language.
func Comprehends(speaker, alternative Tag) Confidence {
_, _, c := NewMatcher([]Tag{alternative}).Match(speaker)
return c
}
// NewMatcher returns a Matcher that matches an ordered list of preferred tags
// against a list of supported tags based on written intelligibility, closeness
// of dialect, equivalence of subtags and various other rules. It is initialized
// with the list of supported tags. The first element is used as the default
// value in case no match is found.
//
// Its Match method matches the first of the given Tags to reach a certain
// confidence threshold. The tags passed to Match should therefore be specified
// in order of preference. Extensions are ignored for matching.
//
// The index returned by the Match method corresponds to the index of the
// matched tag in t, but is augmented with the Unicode extension ('u')of the
// corresponding preferred tag. This allows user locale options to be passed
// transparently.
func NewMatcher(t []Tag, options ...MatchOption) Matcher {
return newMatcher(t, options)
}
func (m *matcher) Match(want ...Tag) (t Tag, index int, c Confidence) {
var tt language.Tag
match, w, c := m.getBest(want...)
if match != nil {
tt, index = match.tag, match.index
} else {
// TODO: this should be an option
tt = m.default_.tag
if m.preferSameScript {
outer:
for _, w := range want {
script, _ := w.Script()
if script.scriptID == 0 {
// Don't do anything if there is no script, such as with
// private subtags.
continue
}
for i, h := range m.supported {
if script.scriptID == h.maxScript {
tt, index = h.tag, i
break outer
}
}
}
}
// TODO: select first language tag based on script.
}
if w.RegionID != tt.RegionID && w.RegionID != 0 {
if w.RegionID != 0 && tt.RegionID != 0 && tt.RegionID.Contains(w.RegionID) {
tt.RegionID = w.RegionID
tt.RemakeString()
} else if r := w.RegionID.String(); len(r) == 2 {
// TODO: also filter macro and deprecated.
tt, _ = tt.SetTypeForKey("rg", strings.ToLower(r)+"zzzz")
}
}
// Copy options from the user-provided tag into the result tag. This is hard
// to do after the fact, so we do it here.
// TODO: add in alternative variants to -u-va-.
// TODO: add preferred region to -u-rg-.
if e := w.Extensions(); len(e) > 0 {
b := language.Builder{}
b.SetTag(tt)
for _, e := range e {
b.AddExt(e)
}
tt = b.Make()
}
return makeTag(tt), index, c
}
// ErrMissingLikelyTagsData indicates no information was available
// to compute likely values of missing tags.
var ErrMissingLikelyTagsData = errors.New("missing likely tags data")
// func (t *Tag) setTagsFrom(id Tag) {
// t.LangID = id.LangID
// t.ScriptID = id.ScriptID
// t.RegionID = id.RegionID
// }
// Tag Matching
// CLDR defines an algorithm for finding the best match between two sets of language
// tags. The basic algorithm defines how to score a possible match and then find
// the match with the best score
// (see https://www.unicode.org/reports/tr35/#LanguageMatching).
// Using scoring has several disadvantages. The scoring obfuscates the importance of
// the various factors considered, making the algorithm harder to understand. Using
// scoring also requires the full score to be computed for each pair of tags.
//
// We will use a different algorithm which aims to have the following properties:
// - clarity on the precedence of the various selection factors, and
// - improved performance by allowing early termination of a comparison.
//
// Matching algorithm (overview)
// Input:
// - supported: a set of supported tags
// - default: the default tag to return in case there is no match
// - desired: list of desired tags, ordered by preference, starting with
// the most-preferred.
//
// Algorithm:
// 1) Set the best match to the lowest confidence level
// 2) For each tag in "desired":
// a) For each tag in "supported":
// 1) compute the match between the two tags.
// 2) if the match is better than the previous best match, replace it
// with the new match. (see next section)
// b) if the current best match is Exact and pin is true the result will be
// frozen to the language found thusfar, although better matches may
// still be found for the same language.
// 3) If the best match so far is below a certain threshold, return "default".
//
// Ranking:
// We use two phases to determine whether one pair of tags are a better match
// than another pair of tags. First, we determine a rough confidence level. If the
// levels are different, the one with the highest confidence wins.
// Second, if the rough confidence levels are identical, we use a set of tie-breaker
// rules.
//
// The confidence level of matching a pair of tags is determined by finding the
// lowest confidence level of any matches of the corresponding subtags (the
// result is deemed as good as its weakest link).
// We define the following levels:
// Exact - An exact match of a subtag, before adding likely subtags.
// MaxExact - An exact match of a subtag, after adding likely subtags.
// [See Note 2].
// High - High level of mutual intelligibility between different subtag
// variants.
// Low - Low level of mutual intelligibility between different subtag
// variants.
// No - No mutual intelligibility.
//
// The following levels can occur for each type of subtag:
// Base: Exact, MaxExact, High, Low, No
// Script: Exact, MaxExact [see Note 3], Low, No
// Region: Exact, MaxExact, High
// Variant: Exact, High
// Private: Exact, No
//
// Any result with a confidence level of Low or higher is deemed a possible match.
// Once a desired tag matches any of the supported tags with a level of MaxExact
// or higher, the next desired tag is not considered (see Step 2.b).
// Note that CLDR provides languageMatching data that defines close equivalence
// classes for base languages, scripts and regions.
//
// Tie-breaking
// If we get the same confidence level for two matches, we apply a sequence of
// tie-breaking rules. The first that succeeds defines the result. The rules are
// applied in the following order.
// 1) Original language was defined and was identical.
// 2) Original region was defined and was identical.
// 3) Distance between two maximized regions was the smallest.
// 4) Original script was defined and was identical.
// 5) Distance from want tag to have tag using the parent relation [see Note 5.]
// If there is still no winner after these rules are applied, the first match
// found wins.
//
// Notes:
// [2] In practice, as matching of Exact is done in a separate phase from
// matching the other levels, we reuse the Exact level to mean MaxExact in
// the second phase. As a consequence, we only need the levels defined by
// the Confidence type. The MaxExact confidence level is mapped to High in
// the public API.
// [3] We do not differentiate between maximized script values that were derived
// from suppressScript versus most likely tag data. We determined that in
// ranking the two, one ranks just after the other. Moreover, the two cannot
// occur concurrently. As a consequence, they are identical for practical
// purposes.
// [4] In case of deprecated, macro-equivalents and legacy mappings, we assign
// the MaxExact level to allow iw vs he to still be a closer match than
// en-AU vs en-US, for example.
// [5] In CLDR a locale inherits fields that are unspecified for this locale
// from its parent. Therefore, if a locale is a parent of another locale,
// it is a strong measure for closeness, especially when no other tie
// breaker rule applies. One could also argue it is inconsistent, for
// example, when pt-AO matches pt (which CLDR equates with pt-BR), even
// though its parent is pt-PT according to the inheritance rules.
//
// Implementation Details:
// There are several performance considerations worth pointing out. Most notably,
// we preprocess as much as possible (within reason) at the time of creation of a
// matcher. This includes:
// - creating a per-language map, which includes data for the raw base language
// and its canonicalized variant (if applicable),
// - expanding entries for the equivalence classes defined in CLDR's
// languageMatch data.
// The per-language map ensures that typically only a very small number of tags
// need to be considered. The pre-expansion of canonicalized subtags and
// equivalence classes reduces the amount of map lookups that need to be done at
// runtime.
// matcher keeps a set of supported language tags, indexed by language.
type matcher struct {
default_ *haveTag
supported []*haveTag
index map[language.Language]*matchHeader
passSettings bool
preferSameScript bool
}
// matchHeader has the lists of tags for exact matches and matches based on
// maximized and canonicalized tags for a given language.
type matchHeader struct {
haveTags []*haveTag
original bool
}
// haveTag holds a supported Tag and its maximized script and region. The maximized
// or canonicalized language is not stored as it is not needed during matching.
type haveTag struct {
tag language.Tag
// index of this tag in the original list of supported tags.
index int
// conf is the maximum confidence that can result from matching this haveTag.
// When conf < Exact this means it was inserted after applying a CLDR equivalence rule.
conf Confidence
// Maximized region and script.
maxRegion language.Region
maxScript language.Script
// altScript may be checked as an alternative match to maxScript. If altScript
// matches, the confidence level for this match is Low. Theoretically there
// could be multiple alternative scripts. This does not occur in practice.
altScript language.Script
// nextMax is the index of the next haveTag with the same maximized tags.
nextMax uint16
}
func makeHaveTag(tag language.Tag, index int) (haveTag, language.Language) {
max := tag
if tag.LangID != 0 || tag.RegionID != 0 || tag.ScriptID != 0 {
max, _ = canonicalize(All, max)
max, _ = max.Maximize()
max.RemakeString()
}
return haveTag{tag, index, Exact, max.RegionID, max.ScriptID, altScript(max.LangID, max.ScriptID), 0}, max.LangID
}
// altScript returns an alternative script that may match the given script with
// a low confidence. At the moment, the langMatch data allows for at most one
// script to map to another and we rely on this to keep the code simple.
func altScript(l language.Language, s language.Script) language.Script {
for _, alt := range matchScript {
// TODO: also match cases where language is not the same.
if (language.Language(alt.wantLang) == l || language.Language(alt.haveLang) == l) &&
language.Script(alt.haveScript) == s {
return language.Script(alt.wantScript)
}
}
return 0
}
// addIfNew adds a haveTag to the list of tags only if it is a unique tag.
// Tags that have the same maximized values are linked by index.
func (h *matchHeader) addIfNew(n haveTag, exact bool) {
h.original = h.original || exact
// Don't add new exact matches.
for _, v := range h.haveTags {
if equalsRest(v.tag, n.tag) {
return
}
}
// Allow duplicate maximized tags, but create a linked list to allow quickly
// comparing the equivalents and bail out.
for i, v := range h.haveTags {
if v.maxScript == n.maxScript &&
v.maxRegion == n.maxRegion &&
v.tag.VariantOrPrivateUseTags() == n.tag.VariantOrPrivateUseTags() {
for h.haveTags[i].nextMax != 0 {
i = int(h.haveTags[i].nextMax)
}
h.haveTags[i].nextMax = uint16(len(h.haveTags))
break
}
}
h.haveTags = append(h.haveTags, &n)
}
// header returns the matchHeader for the given language. It creates one if
// it doesn't already exist.
func (m *matcher) header(l language.Language) *matchHeader {
if h := m.index[l]; h != nil {
return h
}
h := &matchHeader{}
m.index[l] = h
return h
}
func toConf(d uint8) Confidence {
if d <= 10 {
return High
}
if d < 30 |
return No
}
// newMatcher builds an index for the given supported tags and returns it as
// a matcher. It also expands the index by considering various equivalence classes
// for a given tag.
func newMatcher(supported []Tag, options []MatchOption) *matcher {
m := &matcher{
index: make(map[language.Language]*matchHeader),
preferSameScript: true,
}
for _, o := range options {
o(m)
}
if len(supported) == 0 {
m.default_ = &haveTag{}
return m
}
// Add supported languages to the index. Add exact matches first to give
// them precedence.
for i, tag := range supported {
tt := tag.tag()
pair, _ := makeHaveTag(tt, i)
m.header(tt.LangID).addIfNew(pair, true)
m.supported = append(m.supported, &pair)
}
m.default_ = m.header(supported[0].lang()).haveTags[0]
// Keep these in two different loops to support the case that two equivalent
// languages are distinguished, such as iw and he.
for i, tag := range supported {
tt := tag.tag()
pair, max := makeHaveTag(tt, i)
if max != tt.LangID {
m.header(max).addIfNew(pair, true)
}
}
// update is used to add indexes in the map for equivalent languages.
// update will only add entries to original indexes, thus not computing any
// transitive relations.
update := func(want, have uint16, conf Confidence) {
if hh := m.index[language.Language(have)]; hh != nil {
if !hh.original {
return
}
hw := m.header(language.Language(want))
for _, ht := range hh.haveTags {
v := *ht
if conf < v.conf {
v.conf = conf
}
v.nextMax = 0 // this value needs to be recomputed
if v.altScript != 0 {
v.altScript = altScript(language.Language(want), v.maxScript)
}
hw.addIfNew(v, conf == Exact && hh.original)
}
}
}
// Add entries for languages with mutual intelligibility as defined by CLDR's
// languageMatch data.
for _, ml := range matchLang {
update(ml.want, ml.have, toConf(ml.distance))
if !ml.oneway {
update(ml.have, ml.want, toConf(ml.distance))
}
}
// Add entries for possible canonicalizations. This is an optimization to
// ensure that only one map lookup needs to be done at runtime per desired tag.
// First we match deprecated equivalents. If they are perfect equivalents
// (their canonicalization simply substitutes a different language code, but
// nothing else), the match confidence is Exact, otherwise it is High.
for i, lm := range language.AliasMap {
// If deprecated codes match and there is no fiddling with the script or
// or region, we consider it an exact match.
conf := Exact
if language.AliasTypes[i] != language.Macro {
if !isExactEquivalent(language.Language(lm.From)) {
conf = High
}
update(lm.To, lm.From, conf)
}
update(lm.From, lm.To, conf)
}
return m
}
// getBest gets the best matching tag in m for any of the given tags, taking into
// account the order of preference of the given tags.
func (m *matcher) getBest(want ...Tag) (got *haveTag, orig language.Tag, c Confidence) {
best := bestMatch{}
for i, ww := range want {
w := ww.tag()
var max language.Tag
// Check for exact match first.
h := m.index[w.LangID]
if w.LangID != 0 {
if h == nil {
continue
}
// Base language is defined.
max, _ = canonicalize(Legacy|Deprecated|Macro, w)
// A region that is added through canonicalization is stronger than
// a maximized region: set it in the original (e.g. mo -> ro-MD).
if w.RegionID != max.RegionID {
w.RegionID = max.RegionID
}
// TODO: should we do the same for scripts?
// See test case: en, sr, nl ; sh ; sr
max, _ = max.Maximize()
} else {
// Base language is not defined.
if h != nil {
for i := range h.haveTags {
have := h.haveTags[i]
if equalsRest(have.tag, w) {
return have, w, Exact
}
}
}
if w.ScriptID == 0 && w.RegionID == 0 {
// We skip all tags matching und for approximate matching, including
// private tags.
continue
}
max, _ = w.Maximize()
if h = m.index[max.LangID]; h == nil {
continue
}
}
pin := true
for _, t := range want[i+1:] {
if w.LangID == t.lang() {
pin = false
break
}
}
// Check for match based on maximized tag.
for i := range h.haveTags {
have := h.haveTags[i]
best.update(have, w, max.ScriptID, max.RegionID, pin)
if best.conf == Exact {
for have.nextMax != 0 {
have = h.haveTags[have.nextMax]
best.update(have, w, max.ScriptID, max.RegionID, pin)
}
return best.have, best.want, best.conf
}
}
}
if best.conf <= No {
if len(want) != 0 {
return nil, want[0].tag(), No
}
return nil, language.Tag{}, No
}
return best.have, best.want, best.conf
}
// bestMatch accumulates the best match so far.
type bestMatch struct {
have *haveTag
want language.Tag
conf Confidence
pinnedRegion language.Region
pinLanguage bool
sameRegionGroup bool
// Cached results from applying tie-breaking rules.
origLang bool
origReg bool
paradigmReg bool
regGroupDist uint8
origScript bool
}
// update updates the existing best match if the new pair is considered to be a
// better match. To determine if the given pair is a better match, it first
// computes the rough confidence level. If this surpasses the current match, it
// will replace it and update the tie-breaker rule cache. If there is a tie, it
// proceeds with applying a series of tie-breaker rules. If there is no
// conclusive winner after applying the tie-breaker rules, it leaves the current
// match as the preferred match.
//
// If pin is true and have and tag are a strong match, it will henceforth only
// consider matches for this language. This corresponds to the nothing that most
// users have a strong preference for the first defined language. A user can
// still prefer a second language over a dialect of the preferred language by
// explicitly specifying dialects, e.g. "en, nl, en-GB". In this case pin should
// be false.
func (m *bestMatch) update(have *haveTag, tag language.Tag, maxScript language.Script, maxRegion language.Region, pin bool) {
// Bail if the maximum attainable confidence is below that of the current best match.
c := have.conf
if c < m.conf {
return
}
// Don't change the language once we already have found an exact match.
if m.pinLanguage && tag.LangID != m.want.LangID {
return
}
// Pin the region group if we are comparing tags for the same language.
if tag.LangID == m.want.LangID && m.sameRegionGroup {
_, sameGroup := regionGroupDist(m.pinnedRegion, have.maxRegion, have.maxScript, m.want.LangID)
if !sameGroup {
return
}
}
if c == Exact && have.maxScript == maxScript {
// If there is another language and then another entry of this language,
// don't pin anything, otherwise pin the language.
m.pinLanguage = pin
}
if equalsRest(have.tag, tag) {
} else if have.maxScript != maxScript {
// There is usually very little comprehension between different scripts.
// In a few cases there may still be Low comprehension. This possibility
// is pre-computed and stored in have.altScript.
if Low < m.conf || have.altScript != maxScript {
return
}
c = Low
} else if have.maxRegion != maxRegion {
if High < c {
// There is usually a small difference between languages across regions.
c = High
}
}
// We store the results of the computations of the tie-breaker rules along
// with the best match. There is no need to do the checks once we determine
// we have a winner, but we do still need to do the tie-breaker computations.
// We use "beaten" to keep track if we still need to do the checks.
beaten := false // true if the new pair defeats the current one.
if c != m.conf {
if c < m.conf {
return
}
beaten = true
}
// Tie-breaker rules:
// We prefer if the pre-maximized language was specified and identical.
origLang := have.tag.LangID == tag.LangID && tag.LangID != 0
if !beaten && m.origLang != origLang {
if m.origLang {
return
}
beaten = true
}
// We prefer if the pre-maximized region was specified and identical.
origReg := have.tag.RegionID == tag.RegionID && tag.RegionID != 0
if !beaten && m.origReg != origReg {
if m.origReg {
return
}
beaten = true
}
regGroupDist, sameGroup := regionGroupDist(have.maxRegion, maxRegion, maxScript, tag.LangID)
if !beaten && m.regGroupDist != regGroupDist {
if regGroupDist > m.regGroupDist {
return
}
beaten = true
}
paradigmReg := isParadigmLocale(tag.LangID, have.maxRegion)
if !beaten && m.paradigmReg != paradigmReg {
if !paradigmReg {
return
}
beaten = true
}
// Next we prefer if the pre-maximized script was specified and identical.
origScript := have.tag.ScriptID == tag.ScriptID && tag.ScriptID != 0
if !beaten && m.origScript != origScript {
if m.origScript {
return
}
beaten = true
}
// Update m to the newly found best match.
if beaten {
m.have = have
m.want = tag
m.conf = c
m.pinnedRegion = maxRegion
m.sameRegionGroup = sameGroup
m.origLang = origLang
m.origReg = origReg
m.paradigmReg = paradigmReg
m.origScript = origScript
m.regGroupDist = regGroupDist
}
}
func isParadigmLocale(lang language.Language, r language.Region) bool {
for _, e := range paradigmLocales {
if language.Language(e[0]) == lang && (r == language.Region(e[1]) || r == language.Region(e[2])) {
return true
}
}
return false
}
// regionGroupDist computes the distance between two regions based on their
// CLDR grouping.
func regionGroupDist(a, b language.Region, script language.Script, lang language.Language) (dist uint8, same bool) {
const defaultDistance = 4
aGroup := uint(regionToGroups[a]) << 1
bGroup := uint(regionToGroups[b]) << 1
for _, ri := range matchRegion {
if language.Language(ri.lang) == lang && (ri.script == 0 || language.Script(ri.script) == script) {
group := uint(1 << (ri.group &^ 0x80))
if 0x80&ri.group == 0 {
if aGroup&bGroup&group != 0 { // Both regions are in the group.
return ri.distance, ri.distance == defaultDistance
}
} else {
if (aGroup|bGroup)&group == 0 { // Both regions are not in the group.
return ri.distance, ri.distance == defaultDistance
}
}
}
}
return defaultDistance, true
}
// equalsRest compares everything except the language.
func equalsRest(a, b language.Tag) bool {
// TODO: don't include extensions in this comparison. To do this efficiently,
// though, we should handle private tags separately.
return a.ScriptID == b.ScriptID && a.RegionID == b.RegionID && a.VariantOrPrivateUseTags() == b.VariantOrPrivateUseTags()
}
// isExactEquivalent returns true if canonicalizing the language will not alter
// the script or region of a tag.
func isExactEquivalent(l language.Language) bool {
for _, o := range notEquivalent {
if o == l {
return false
}
}
return true
}
var notEquivalent []language.Language
func init() {
// Create a list of all languages for which canonicalization may alter the
// script or region.
for _, lm := range language.AliasMap {
tag := language.Tag{LangID: language.Language(lm.From)}
if tag, _ = canonicalize(All, tag); tag.ScriptID != 0 || tag.RegionID != 0 {
notEquivalent = append(notEquivalent, language.Language(lm.From))
}
}
// Maximize undefined regions of paradigm locales.
for i, v := range paradigmLocales {
t := language.Tag{LangID: language.Language(v[0])}
max, _ := t.Maximize()
if v[1] == 0 {
paradigmLocales[i][1] = uint16(max.RegionID)
}
if v[2] == 0 {
paradigmLocales[i][2] = uint16(max.RegionID)
}
}
}
| {
return Low
} |
model.go | package blockbook
import (
"github.com/trustwallet/golibs/tokentype"
"math/big"
)
type Page struct {
Transactions []Transaction `json:"transactions,omitempty"`
Tokens []Token `json:"tokens,omitempty"`
}
type NodeInfo struct {
Blockbook *Blockbook `json:"blockbook"`
Backend *Backend `json:"backend"`
}
type Blockbook struct {
BestHeight int64 `json:"bestHeight"`
}
type Backend struct {
Blocks int64 `json:"blocks"`
}
type Block struct {
Transactions []Transaction `json:"txs"`
}
type Transaction struct {
TxID string `json:"txid"`
Vin []Output `json:"vin"`
Vout []Output `json:"vout"`
BlockHeight int64 `json:"blockHeight"`
BlockTime int64 `json:"blockTime"`
Value string `json:"value"`
Fees string `json:"fees"`
TokenTransfers []TokenTransfer `json:"tokenTransfers,omitempty"`
EthereumSpecific *EthereumSpecific `json:"ethereumSpecific,omitempty"`
}
type Output struct {
Value string `json:"value,omitempty"`
Addresses []string `json:"addresses"`
}
type TokenTransfer struct {
Decimals uint `json:"decimals"`
From string `json:"from"`
Name string `json:"name"`
Symbol string `json:"symbol"`
To string `json:"to"`
Token string `json:"token"`
Type string `json:"type"`
Value string `json:"value"`
}
// Token contains info about tokens held by an address
type Token struct {
Balance string `json:"balance,omitempty"`
Contract string `json:"contract"` | Decimals uint `json:"decimals"`
Name string `json:"name"`
Symbol string `json:"symbol"`
Type tokentype.Type `json:"type"`
}
// EthereumSpecific contains ethereum specific transaction data
type EthereumSpecific struct {
Status int `json:"status"` // -1 pending, 0 Fail, 1 OK
Nonce uint64 `json:"nonce"`
GasLimit *big.Int `json:"gasLimit"`
GasUsed *big.Int `json:"gasUsed"`
GasPrice string `json:"gasPrice"`
Data string `json:"data,omitempty"`
} | |
parser.rs | //! Benchmarks of the parsing process in Boa.
use boa::syntax::{lexer::Lexer, parser::Parser};
use criterion::{black_box, criterion_group, criterion_main, Criterion};
#[cfg(all(target_arch = "x86_64", target_os = "linux", target_env = "gnu"))]
#[cfg_attr(
all(target_arch = "x86_64", target_os = "linux", target_env = "gnu"),
global_allocator
)]
static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;
static EXPRESSION: &str = r#"
1 + 1 + 1 + 1 + 1 + 1 / 1 + 1 + 1 * 1 + 1 + 1 + 1;
"#;
fn expression_parser(c: &mut Criterion) {
// We include the lexing in the benchmarks, since they will get together soon, anyways.
c.bench_function("Expression (Parser)", move |b| {
b.iter(|| {
let mut lexer = Lexer::new(black_box(EXPRESSION));
lexer.lex().expect("failed to lex");
Parser::new(&black_box(lexer.tokens)).parse_all()
})
});
}
static HELLO_WORLD: &str = "let foo = 'hello world!'; foo;";
fn hello_world_parser(c: &mut Criterion) {
// We include the lexing in the benchmarks, since they will get together soon, anyways.
c.bench_function("Hello World (Parser)", move |b| {
b.iter(|| {
let mut lexer = Lexer::new(black_box(HELLO_WORLD));
lexer.lex().expect("failed to lex");
Parser::new(&black_box(lexer.tokens)).parse_all()
})
});
}
static FOR_LOOP: &str = r#"
for (let a = 10; a < 100; a++) {
if (a < 10) {
console.log("impossible D:");
} else if (a < 50) {
console.log("starting");
} else {
console.log("finishing");
}
}
"#;
fn for_loop_parser(c: &mut Criterion) {
// We include the lexing in the benchmarks, since they will get together soon, anyways.
c.bench_function("For loop (Parser)", move |b| {
b.iter(|| {
let mut lexer = Lexer::new(black_box(FOR_LOOP));
lexer.lex().expect("failed to lex");
Parser::new(&black_box(lexer.tokens)).parse_all()
})
});
}
static LONG_REPETITION: &str = r#"
for (let a = 10; a < 100; a++) {
if (a < 10) {
console.log("impossible D:");
} else if (a < 50) {
console.log("starting");
} else {
console.log("finishing");
}
}
"#;
fn long_file_parser(c: &mut Criterion) {
use std::{
fs::{self, File},
io::{BufWriter, Write},
};
// We include the lexing in the benchmarks, since they will get together soon, anyways.
const FILE_NAME: &str = "long_file_test.js";
{
let mut file = BufWriter::new(
File::create(FILE_NAME).unwrap_or_else(|_| panic!("could not create {}", FILE_NAME)),
);
for _ in 0..400 {
file.write_all(LONG_REPETITION.as_bytes())
.unwrap_or_else(|_| panic!("could not write {}", FILE_NAME));
}
}
c.bench_function("Long file (Parser)", move |b| {
b.iter(|| {
let file_str = fs::read_to_string(FILE_NAME)
.unwrap_or_else(|_| panic!("could not read {}", FILE_NAME));
let mut lexer = Lexer::new(black_box(&file_str));
lexer.lex().expect("failed to lex");
Parser::new(&black_box(lexer.tokens)).parse_all()
})
});
fs::remove_file(FILE_NAME).unwrap_or_else(|_| panic!("could not remove {}", FILE_NAME));
}
static GOAL_SYMBOL_SWITCH: &str = r#"
function foo(regex, num) {}
let i = 0;
while (i < 1000000) {
foo(/ab+c/, 5.0/5);
i++;
}
"#;
fn goal_symbol_switch(c: &mut Criterion) |
criterion_group!(
parser,
expression_parser,
hello_world_parser,
for_loop_parser,
long_file_parser,
goal_symbol_switch,
);
criterion_main!(parser);
| {
// We include the lexing in the benchmarks, since they will get together soon, anyways.
c.bench_function("Goal Symbols (Parser)", move |b| {
b.iter(|| {
let mut lexer = Lexer::new(black_box(GOAL_SYMBOL_SWITCH));
lexer.lex().expect("failed to lex");
Parser::new(&black_box(lexer.tokens)).parse_all()
})
});
} |
diff-main.go | /*
* MinIO Client (C) 2015-2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"fmt"
"strings"
"github.com/fatih/color"
"github.com/minio/cli"
json "github.com/minio/mc/pkg/colorjson"
"github.com/minio/mc/pkg/probe"
"github.com/minio/minio/pkg/console"
)
// diff specific flags.
var (
diffFlags = []cli.Flag{}
)
// Compute differences in object name, size, and date between two buckets.
var diffCmd = cli.Command{
Name: "diff",
Usage: "list differences in object name, size, and date between two buckets",
Action: mainDiff,
Before: setGlobalsFromContext,
Flags: append(diffFlags, globalFlags...),
CustomHelpTemplate: `NAME:
{{.HelpName}} - {{.Usage}}
USAGE:
{{.HelpName}} [FLAGS] FIRST SECOND
FLAGS:
{{range .VisibleFlags}}{{.}}
{{end}}
DESCRIPTION:
Diff only calculates differences in object name, size and time. It *DOES NOT* compare objects' contents.
LEGEND:
< - object is only in source.
> - object is only in destination.
! - newer object is in source.
EXAMPLES:
1. Compare a local folder with a folder on Amazon S3 cloud storage.
{{.Prompt}} {{.HelpName}} ~/Photos s3/mybucket/Photos
2. Compare two folders on a local filesystem.
{{.Prompt}} {{.HelpName}} ~/Photos /Media/Backup/Photos
`,
}
// diffMessage json container for diff messages
type diffMessage struct {
Status string `json:"status"`
FirstURL string `json:"first"`
SecondURL string `json:"second"`
Diff differType `json:"diff"`
Error *probe.Error `json:"error,omitempty"`
firstContent *ClientContent
secondContent *ClientContent
}
// String colorized diff message
func (d diffMessage) String() string {
msg := ""
switch d.Diff {
case differInFirst:
msg = console.Colorize("DiffOnlyInFirst", "< "+d.FirstURL)
case differInSecond:
msg = console.Colorize("DiffOnlyInSecond", "> "+d.SecondURL)
case differInType:
msg = console.Colorize("DiffType", "! "+d.SecondURL)
case differInSize:
msg = console.Colorize("DiffSize", "! "+d.SecondURL)
case differInMetadata:
msg = console.Colorize("DiffMetadata", "! "+d.SecondURL)
default:
fatalIf(errDummy().Trace(d.FirstURL, d.SecondURL),
"Unhandled difference between `"+d.FirstURL+"` and `"+d.SecondURL+"`.")
}
return msg
}
// JSON jsonified diff message
func (d diffMessage) JSON() string {
d.Status = "success"
diffJSONBytes, e := json.MarshalIndent(d, "", " ")
fatalIf(probe.NewError(e),
"Unable to marshal diff message `"+d.FirstURL+"`, `"+d.SecondURL+"` and `"+string(d.Diff)+"`.")
return string(diffJSONBytes)
}
func checkDiffSyntax(ctx *cli.Context, encKeyDB map[string][]prefixSSEPair) {
if len(ctx.Args()) != 2 {
cli.ShowCommandHelpAndExit(ctx, "diff", 1) // last argument is exit code
}
for _, arg := range ctx.Args() {
if strings.TrimSpace(arg) == "" {
fatalIf(errInvalidArgument().Trace(ctx.Args()...), "Unable to validate empty argument.")
}
}
URLs := ctx.Args()
firstURL := URLs[0]
secondURL := URLs[1]
// Diff only works between two directories, verify them below.
// Verify if firstURL is accessible.
_, firstContent, err := url2Stat(firstURL, false, encKeyDB)
if err != nil {
fatalIf(err.Trace(firstURL), fmt.Sprintf("Unable to stat '%s'.", firstURL))
}
// Verify if its a directory.
if !firstContent.Type.IsDir() {
fatalIf(errInvalidArgument().Trace(firstURL), fmt.Sprintf("`%s` is not a folder.", firstURL))
}
// Verify if secondURL is accessible.
_, secondContent, err := url2Stat(secondURL, false, encKeyDB)
if err != nil {
fatalIf(err.Trace(secondURL), fmt.Sprintf("Unable to stat '%s'.", secondURL))
}
// Verify if its a directory.
if !secondContent.Type.IsDir() {
fatalIf(errInvalidArgument().Trace(secondURL), fmt.Sprintf("`%s` is not a folder.", secondURL))
}
}
// doDiffMain runs the diff.
func doDiffMain(firstURL, secondURL string) error {
// Source and targets are always directories
sourceSeparator := string(newClientURL(firstURL).Separator)
if !strings.HasSuffix(firstURL, sourceSeparator) {
firstURL = firstURL + sourceSeparator
}
targetSeparator := string(newClientURL(secondURL).Separator)
if !strings.HasSuffix(secondURL, targetSeparator) {
secondURL = secondURL + targetSeparator
}
// Expand aliased urls.
firstAlias, firstURL, _ := mustExpandAlias(firstURL)
secondAlias, secondURL, _ := mustExpandAlias(secondURL)
firstClient, err := newClientFromAlias(firstAlias, firstURL)
if err != nil {
fatalIf(err.Trace(firstAlias, firstURL, secondAlias, secondURL),
fmt.Sprintf("Failed to diff '%s' and '%s'", firstURL, secondURL))
}
secondClient, err := newClientFromAlias(secondAlias, secondURL)
if err != nil {
fatalIf(err.Trace(firstAlias, firstURL, secondAlias, secondURL),
fmt.Sprintf("Failed to diff '%s' and '%s'", firstURL, secondURL))
}
// Diff first and second urls.
for diffMsg := range objectDifference(firstClient, secondClient, firstURL, secondURL, false) {
if diffMsg.Error != nil {
errorIf(diffMsg.Error, "Unable to calculate objects difference.")
// Ignore error and proceed to next object.
continue
}
printMsg(diffMsg)
}
return nil
}
// mainDiff main for 'diff'.
func | (ctx *cli.Context) error {
// Parse encryption keys per command.
encKeyDB, err := getEncKeys(ctx)
fatalIf(err, "Unable to parse encryption keys.")
// check 'diff' cli arguments.
checkDiffSyntax(ctx, encKeyDB)
// Additional command specific theme customization.
console.SetColor("DiffMessage", color.New(color.FgGreen, color.Bold))
console.SetColor("DiffOnlyInFirst", color.New(color.FgRed))
console.SetColor("DiffOnlyInSecond", color.New(color.FgGreen))
console.SetColor("DiffType", color.New(color.FgMagenta))
console.SetColor("DiffSize", color.New(color.FgYellow, color.Bold))
console.SetColor("DiffTime", color.New(color.FgYellow, color.Bold))
URLs := ctx.Args()
firstURL := URLs.Get(0)
secondURL := URLs.Get(1)
return doDiffMain(firstURL, secondURL)
}
| mainDiff |
settings.py | """
Django settings for config project.
Generated by 'django-admin startproject' using Django 4.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.0/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-n1vl4be=11s&5oo0^453rw&9(g3v0pjb6=t4ze@d_3j4i3)y+y'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
| INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'server.config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'server.config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = 'static/'
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField' | |
querier.rs | use cosmwasm_std::{
Binary, CanonicalAddr, QuerierWrapper, QueryRequest, StdResult, Uint128, WasmQuery,
};
use cosmwasm_storage::to_length_prefixed;
pub fn load_token_balance(
querier: &QuerierWrapper,
contract_addr: String,
account_addr: &CanonicalAddr,
) -> StdResult<Uint128> {
// load balance form the token contract
let res: Uint128 = querier
.query(&QueryRequest::Wasm(WasmQuery::Raw {
contract_addr,
key: Binary::from(concat(
&to_length_prefixed(b"balance").to_vec(),
account_addr.as_slice(),
)),
}))
.unwrap_or_else(|_| Uint128::zero());
Ok(res)
}
#[inline] | } | fn concat(namespace: &[u8], key: &[u8]) -> Vec<u8> {
let mut k = namespace.to_vec();
k.extend_from_slice(key);
k |
app.js | //GLOBAL VARIABLES
//window.player = music player
const defaultAlbumImg = "https://d2qqvwdwi4u972.cloudfront.net/static/img/default_album.png"
$(document).ready(function(){
window.player = $("#music-player")
$("footer:not(#orig-footer)").remove()
/* CSRF Handling*/
$.ajaxSetup({
beforeSend: function(xhr, settings) {
function getCookie(name) {
var cookieValue = null;
if (document.cookie && document.cookie != '') {
var cookies = document.cookie.split(';');
for (var i = 0; i < cookies.length; i++) {
var cookie = jQuery.trim(cookies[i]);
// Does this cookie string begin with the name we want?
if (cookie.substring(0, name.length + 1) == (name + '=')) {
cookieValue = decodeURIComponent(cookie.substring(name.length + 1));
break;
}
}
}
return cookieValue;
}
if (!(/^http:.*/.test(settings.url) || /^https:.*/.test(settings.url))) {
// Only send the token to relative URLs i.e. locally.
xhr.setRequestHeader("X-CSRFToken", getCookie('csrftoken'));
}
}
});
$("button#search-song").click(searchSong)
$("input#search-songs").keypress(e => {
let key = e.which || e.keyCode
if(key == 13) searchSong()
})
$(document).on("click", ".suggested-track", function() { //click on suggestion
$.post("/postsong", {
'name': searchQuery, //the query was saved the moment the "search" button was pressed
'index': parseInt(this.id), //id is the index of the song in the results list
loc: currentLocation
}, data => {
//if data is valid and preview url is available, set player to player to play preview url
//if invalid or preview url unavailable, dead link is used (not playable)
if(!data.valid || !data.name) {
alert("Unable to add song.")
return
}
let src = (data.preview) ? data.preview : "#"
let image = (data.image) ? data.image : defaultAlbumImg
if($(".no-songs").length || $(".song-error").length) $(".song-list").html("")
$(".song-list").append(`<div class="track">
<p src="${src}">${data.name}<span class="artist"> - ${data.artist}</span></p>
<img src="${image}" width=50 height=50>
</div>`)
pause()
$(".suggestions").html("")
}).fail(() => alert('Error: Could not post song'))
$("button#post-song").val("")
})
$(document).on("click", ":not(.suggestions)", () => $(".suggestions").html(""))
/*Audio controls*/
$(".fa-play").click(() => {
if(!checkPlayable()) return
if(player[0].canPlayType && player[0].paused) play()
else pause()
})
player.on("ended", nextSong)
/*Playlist controls*/
$(document).on("click", ".track", function(){
player.attr("src", $(this).children("p").attr("src"))
$(".selected").removeClass("selected")
$(this).addClass("selected")
if(!checkPlayable()) {
alert("Error: Song not playable")
return
}
play()
})
$(".fa-step-forward").click(nextSong)
$(".fa-step-backward").click(() => {
if($(".selected").is(":first-child")) return
$(".selected").prev().addClass("temp")
$(".selected").removeClass("selected")
$(".temp").addClass("selected")
$(".temp").removeClass("temp")
player.attr("src", $(".selected").children("p").attr("src"))
if(!checkPlayable()) return
play()
})
})
function pause(){
player[0].pause()
$(".fa-play").removeClass("fa-pause")
}
function play(){
player[0].play()
$(".fa-play").addClass("fa-pause")
} | nextSong()
return false
}
return true
}
function nextSong(){
if($(".selected").is(":last-child")) {
$(".selected").removeClass("selected")
pause()
return
}
$(".selected").next().addClass("temp")
$(".selected").removeClass("selected")
$(".temp").addClass("selected")
$(".temp").removeClass("temp")
player.attr("src", $(".selected").children("p").attr("src"))
if(!$(".selected").length || !checkPlayable()) return
play()
}
function searchSong(){
if(!$("#search-songs").val().length) return
$(".suggestions").html("")
window.searchQuery = $("#search-songs").val()
$.get("/searchsong", {'name': $("#search-songs").val()}, data => {
data = JSON.parse(data)
//console.log(data)
data.results.tracks.items.forEach((elem, index) => {
$(".suggestions").append(`<div class="suggested-track" id="${index}">
<p src="${elem.preview_url}">${elem.name}<span class="artist"> - ${elem.artists[0].name}</span></p>
<img src="${elem.album.images[2].url}" width=40 height=40>
</div>`)
})
})
} |
function checkPlayable(){
if(player.attr("src") == "#"){
$(".selected").css("background-color", "#ffcccc") |
simple_test.go | // Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xds_test
import (
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/http/httputil"
"net/url"
"sync"
"testing"
"time"
"istio.io/istio/pilot/pkg/bootstrap"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/labels"
"istio.io/istio/pkg/config/protocol"
"istio.io/istio/pkg/test/env"
"istio.io/istio/tests/util"
)
// This file contains common helpers and initialization for the local/unit tests
// for XDS. Tests start a Pilot, configured with an in-memory endpoint registry, and
// using a file-based config, sourced from tests/testdata.
// A single instance of pilot is used by all tests - similar with the e2e environment.
// initLocalPilotTestEnv() must be called at the start of each test to ensure the
// environment is configured. Tests making modifications to services should use
// unique names for the service.
// The tests can also use a local envoy process - see TestEnvoy as example, to verify
// envoy accepts the config. Most tests are changing and checking the state of pilot.
//
// The pilot is accessible as pilotServer, which is an instance of bootstrap.Server.
// The server has a field EnvoyXdsServer which is the configured instance of the XDS service.
//
// DiscoveryServer.MemRegistry has a memory registry that can be used by tests,
// implemented in debug.go file.
var (
testEnv *env.TestSetup
initMutex sync.Mutex
initEnvoyMutex sync.Mutex
envoyStarted = false
// Use 'service3' and 'app3' for pilot local tests.
localIP = "10.3.0.3"
)
const (
// 10.10.0.0/24 is service CIDR range
// 10.0.0.0/9 is instance CIDR range
app3Ip = "10.2.0.1"
gatewayIP = "10.3.0.1"
ingressIP = "10.3.0.2"
)
// Common code for the xds testing.
// The tests in this package use an in-process pilot using mock service registry and
// envoy.
// Additional servers may be added here.
// One set of pilot/envoy is used for all tests, similar with the larger integration
// tests in real docker/k8s environments
// Common test environment. This is a singleton, the env will be
// used for multiple tests, for local integration testing.
func startEnvoy(t *testing.T) {
initEnvoyMutex.Lock()
defer initEnvoyMutex.Unlock()
if envoyStarted {
return
}
tmplB, err := ioutil.ReadFile(env.IstioSrc + "/tests/testdata/bootstrap_tmpl.json")
if err != nil {
t.Fatal("Can't read bootstrap template", err)
}
testEnv.EnvoyTemplate = string(tmplB)
testEnv.Dir = env.IstioSrc
nodeID := sidecarID(app3Ip, "app3")
testEnv.EnvoyParams = []string{"--service-cluster", "serviceCluster", "--service-node", nodeID}
testEnv.EnvoyConfigOpt = map[string]interface{}{
"NodeID": nodeID,
"BaseDir": env.IstioSrc + "/tests/testdata/local",
// Same value used in the real template
"meta_json_str": fmt.Sprintf(`"BASE": "%s", ISTIO_VERSION: 1.5.0`, env.IstioSrc+"/tests/testdata/local"),
}
if err := testEnv.SetUp(); err != nil {
t.Fatalf("Failed to setup test: %v", err)
}
envoyStarted = true
}
func sidecarID(ip, deployment string) string {
return fmt.Sprintf("sidecar~%s~%s-644fc65469-96dza.testns~testns.svc.cluster.local", ip, deployment)
}
func gatewayID(ip string) string { //nolint: unparam
return fmt.Sprintf("router~%s~istio-gateway-644fc65469-96dzt.istio-system~istio-system.svc.cluster.local", ip)
}
// localPilotTestEnv builds a pilot testing environment and it initializes with registry with the passed in init function.
func localPilotTestEnv(
t *testing.T,
initFunc func(*bootstrap.Server),
additionalArgs ...func(*bootstrap.PilotArgs)) (*bootstrap.Server, util.TearDownFunc) { //nolint: unparam
initMutex.Lock()
defer initMutex.Unlock()
additionalArgs = append(additionalArgs, func(args *bootstrap.PilotArgs) {
args.Plugins = bootstrap.DefaultPlugins
})
server, tearDown := util.EnsureTestServer(additionalArgs...)
testEnv = env.NewTestSetup(env.XDSTest, t)
testEnv.Ports().PilotGrpcPort = uint16(util.MockPilotGrpcPort)
testEnv.Ports().PilotHTTPPort = uint16(util.MockPilotHTTPPort)
testEnv.IstioSrc = env.IstioSrc
testEnv.IstioOut = env.IstioOut
localIP = getLocalIP()
// Run the initialization function.
initFunc(server)
// Trigger a push, to initiate push context with contents of registry.
server.EnvoyXdsServer.Push(&model.PushRequest{Full: true})
// Wait till a push is propagated.
time.Sleep(200 * time.Millisecond)
// Add a dummy client connection to validate that push is triggered.
dummyClient := adsConnectAndWait(t, 0x0a0a0a0a)
defer dummyClient.Close()
return server, tearDown
}
// initLocalPilotTestEnv creates a local, in process Pilot with XDSv2 support and a set
// of common test configs. This is a singleton server, reused for all tests in this package.
//
// The server will have a set of pre-defined instances and services, and read CRDs from the
// common tests/testdata directory.
func initLocalPilotTestEnv(t *testing.T) (*bootstrap.Server, util.TearDownFunc) {
return localPilotTestEnv(t, func(server *bootstrap.Server) {
// Service and endpoints for hello.default - used in v1 pilot tests
hostname := host.Name("hello.default.svc.cluster.local")
server.EnvoyXdsServer.MemRegistry.AddService(hostname, &model.Service{
Hostname: hostname,
Address: "10.10.0.3",
Ports: testPorts(0),
Attributes: model.ServiceAttributes{
Name: "local",
Namespace: "default",
},
})
server.EnvoyXdsServer.MemRegistry.SetEndpoints(string(hostname), "default", []*model.IstioEndpoint{
{
Address: "127.0.0.1",
EndpointPort: uint32(testEnv.Ports().BackendPort),
ServicePortName: "http",
Locality: model.Locality{Label: "az"},
ServiceAccount: "hello-sa",
},
})
// "local" service points to the current host
hostname = "local.default.svc.cluster.local"
server.EnvoyXdsServer.MemRegistry.AddService(hostname, &model.Service{
Hostname: hostname,
Address: "10.10.0.4",
Ports: []*model.Port{
{
Name: "http",
Port: 80,
Protocol: protocol.HTTP,
}},
Attributes: model.ServiceAttributes{
Name: "local",
Namespace: "default",
},
})
server.EnvoyXdsServer.MemRegistry.SetEndpoints(string(hostname), "default", []*model.IstioEndpoint{
{
Address: localIP,
EndpointPort: uint32(testEnv.Ports().BackendPort),
ServicePortName: "http",
Locality: model.Locality{Label: "az"},
},
})
// Explicit test service, in the v2 memory registry. Similar with mock.MakeService,
// but easier to read.
hostname = "service3.default.svc.cluster.local"
server.EnvoyXdsServer.MemRegistry.AddService(hostname, &model.Service{
Hostname: hostname,
Address: "10.10.0.1",
Ports: testPorts(0),
Attributes: model.ServiceAttributes{
Name: "service3",
Namespace: "default",
},
})
svc3Endpoints := make([]*model.IstioEndpoint, len(testPorts(0)))
for i, p := range testPorts(0) {
svc3Endpoints[i] = &model.IstioEndpoint{
Address: app3Ip,
EndpointPort: uint32(p.Port),
ServicePortName: p.Name,
Locality: model.Locality{Label: "az"},
}
}
server.EnvoyXdsServer.MemRegistry.SetEndpoints(string(hostname), "default", svc3Endpoints)
// Mock ingress service
server.EnvoyXdsServer.MemRegistry.AddService("istio-ingress.istio-system.svc.cluster.local", &model.Service{
Hostname: "istio-ingress.istio-system.svc.cluster.local",
Address: "10.10.0.2",
Ports: []*model.Port{
{
Name: "http",
Port: 80,
Protocol: protocol.HTTP,
},
{
Name: "https",
Port: 443,
Protocol: protocol.HTTPS,
},
},
// TODO: set attribute for this service. It may affect TestLDSIsolated as we now having service defined in istio-system namespaces
})
server.EnvoyXdsServer.MemRegistry.AddInstance("istio-ingress.istio-system.svc.cluster.local", &model.ServiceInstance{
Endpoint: &model.IstioEndpoint{
Address: ingressIP,
EndpointPort: 80,
ServicePortName: "http",
Locality: model.Locality{Label: "az"},
Labels: labels.Instance{constants.IstioLabel: constants.IstioIngressLabelValue},
},
ServicePort: &model.Port{
Name: "http",
Port: 80,
Protocol: protocol.HTTP,
},
})
server.EnvoyXdsServer.MemRegistry.AddInstance("istio-ingress.istio-system.svc.cluster.local", &model.ServiceInstance{
Endpoint: &model.IstioEndpoint{
Address: ingressIP,
EndpointPort: 443,
ServicePortName: "https",
Locality: model.Locality{Label: "az"},
Labels: labels.Instance{constants.IstioLabel: constants.IstioIngressLabelValue},
},
ServicePort: &model.Port{
Name: "https",
Port: 443,
Protocol: protocol.HTTPS,
},
})
// RouteConf Service4 is using port 80, to test that we generate multiple clusters (regression)
// service4 has no endpoints
server.EnvoyXdsServer.MemRegistry.AddHTTPService("service4.default.svc.cluster.local", "10.1.0.4", 80)
})
}
// nolint: unparam
func testPorts(base int) []*model.Port {
return []*model.Port{
{
Name: "http",
Port: base + 80,
Protocol: protocol.HTTP,
}, {
Name: "http-status",
Port: base + 81,
Protocol: protocol.HTTP,
}, {
Name: "custom",
Port: base + 90,
Protocol: protocol.TCP,
}, {
Name: "mongo",
Port: base + 100,
Protocol: protocol.Mongo,
}, {
Name: "redis",
Port: base + 110,
Protocol: protocol.Redis,
}, {
Name: "mysql",
Port: base + 120,
Protocol: protocol.MySQL,
}, {
Name: "h2port",
Port: base + 66,
Protocol: protocol.GRPC,
}}
}
// Test XDS with real envoy.
func TestEnvoy(t *testing.T) {
_, tearDown := initLocalPilotTestEnv(t)
defer func() {
if testEnv != nil {
testEnv.TearDown()
}
tearDown()
}()
startEnvoy(t)
// Make sure tcp port is ready before starting the test.
env.WaitForPort(testEnv.Ports().TCPProxyPort)
t.Run("envoyInit", envoyInit)
t.Run("service", testService)
}
// envoyInit verifies envoy has accepted the config from pilot by checking the stats.
func envoyInit(t *testing.T) {
statsURL := fmt.Sprintf("http://localhost:%d/stats?format=json", testEnv.Ports().AdminPort)
res, err := http.Get(statsURL)
if err != nil {
t.Fatal("Failed to get stats, envoy not started")
}
statsBytes, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal("Failed to get stats, envoy not started")
}
statsMap := stats2map(statsBytes)
if statsMap["cluster_manager.cds.update_success"] < 1 {
t.Error("Failed cds update")
}
// Other interesting values for CDS: cluster_added: 19, active_clusters
// cds.update_attempt: 2, cds.update_rejected, cds.version
for _, port := range testPorts(0) {
stat := fmt.Sprintf("cluster.outbound|%d||service3.default.svc.cluster.local.update_success", port.Port)
if statsMap[stat] < 1 {
t.Error("Failed cds updates")
}
}
if statsMap["cluster.xds-grpc.update_failure"] > 0 {
t.Error("GRPC update failure")
}
if statsMap["listener_manager.lds.update_rejected"] > 0 {
t.Error("LDS update failure")
}
if statsMap["listener_manager.lds.update_success"] < 1 {
t.Error("LDS update failure")
}
}
// Example of using a local test connecting to the in-process test service, using Envoy http proxy
// mode. This is also a test for http proxy (finally).
func testService(t *testing.T) {
proxyURL, _ := url.Parse("http://localhost:17002")
client := &http.Client{Transport: &http.Transport{Proxy: http.ProxyURL(proxyURL)}}
res, err := client.Get("http://local.default.svc.cluster.local")
if err != nil {
t.Error("Failed to access proxy", err)
return
}
resdmp, _ := httputil.DumpResponse(res, true)
t.Log(string(resdmp))
if res.Status != "200 OK" |
}
// EnvoyStat is used to parse envoy stats
type EnvoyStat struct {
Name string `json:"name"`
Value int `json:"value"`
}
// stats2map parses envoy stats.
func stats2map(stats []byte) map[string]int {
s := struct {
Stats []EnvoyStat `json:"stats"`
}{}
_ = json.Unmarshal(stats, &s)
m := map[string]int{}
for _, stat := range s.Stats {
m[stat.Name] = stat.Value
}
return m
}
func getLocalIP() string {
addrs, err := net.InterfaceAddrs()
if err != nil {
return ""
}
for _, address := range addrs {
// check the address type and if it is not a loopback the display it
if ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
if ipnet.IP.To4() != nil {
return ipnet.IP.String()
}
}
}
return ""
}
// newEndpointWithAccount is a helper for IstioEndpoint creation. Creates endpoints with
// port name "http", with the given IP, service account and a 'version' label.
// nolint: unparam
func newEndpointWithAccount(ip, account, version string) []*model.IstioEndpoint {
return []*model.IstioEndpoint{
{
Address: ip,
ServicePortName: "http-main",
EndpointPort: 80,
Labels: map[string]string{"version": version},
UID: "uid1",
ServiceAccount: account,
},
}
}
| {
t.Error("Proxy failed ", res.Status)
} |
vsock_windows.go | package transport
func NewVsockTransport() Transport | {
return &hvs{}
} |
|
common.go | // Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tls
import (
"container/list"
"crypto"
"crypto/rand"
"crypto/sha512"
"github.com/chinaso/fabricGM/cryptopkg/golangGM/x509"
"errors"
"fmt"
//"internal/cpu"
"io"
"math/big"
"net"
"strings"
"sync"
"time"
)
const (
VersionSSL30 = 0x0300
VersionTLS10 = 0x0301
VersionTLS11 = 0x0302
VersionTLS12 = 0x0303
)
const (
maxPlaintext = 16384 // maximum plaintext payload length
maxCiphertext = 16384 + 2048 // maximum ciphertext payload length
recordHeaderLen = 5 // record header length
maxHandshake = 65536 // maximum handshake we support (protocol max is 16 MB)
maxWarnAlertCount = 5 // maximum number of consecutive warning alerts
minVersion = VersionTLS10
maxVersion = VersionTLS12
)
// TLS record types.
type recordType uint8
const (
recordTypeChangeCipherSpec recordType = 20
recordTypeAlert recordType = 21
recordTypeHandshake recordType = 22
recordTypeApplicationData recordType = 23
)
// TLS handshake message types.
const (
typeHelloRequest uint8 = 0
typeClientHello uint8 = 1
typeServerHello uint8 = 2
typeNewSessionTicket uint8 = 4
typeCertificate uint8 = 11
typeServerKeyExchange uint8 = 12
typeCertificateRequest uint8 = 13
typeServerHelloDone uint8 = 14
typeCertificateVerify uint8 = 15
typeClientKeyExchange uint8 = 16
typeFinished uint8 = 20
typeCertificateStatus uint8 = 22
typeNextProtocol uint8 = 67 // Not IANA assigned
)
// TLS compression types.
const (
compressionNone uint8 = 0
)
// TLS extension numbers
const (
extensionServerName uint16 = 0
extensionStatusRequest uint16 = 5
extensionSupportedCurves uint16 = 10
extensionSupportedPoints uint16 = 11
extensionSignatureAlgorithms uint16 = 13
extensionALPN uint16 = 16
extensionSCT uint16 = 18 // https://tools.ietf.org/html/rfc6962#section-6
extensionSessionTicket uint16 = 35
extensionNextProtoNeg uint16 = 13172 // not IANA assigned
extensionRenegotiationInfo uint16 = 0xff01
)
// TLS signaling cipher suite values
const (
scsvRenegotiation uint16 = 0x00ff
)
// CurveID is the type of a TLS identifier for an elliptic curve. See
// https://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-8
type CurveID uint16
const (
CurveP256 CurveID = 23
CurveP384 CurveID = 24
CurveP521 CurveID = 25
X25519 CurveID = 29
// ----------------------------------------------- //
CureP256SM2 CurveID = 31
)
// TLS Elliptic Curve Point Formats
// https://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-9
const (
pointFormatUncompressed uint8 = 0
)
// TLS CertificateStatusType (RFC 3546)
const (
statusTypeOCSP uint8 = 1
)
// Certificate types (for certificateRequestMsg)
const (
certTypeRSASign = 1 // A certificate containing an RSA key
certTypeDSSSign = 2 // A certificate containing a DSA key
certTypeRSAFixedDH = 3 // A certificate containing a static DH key
certTypeDSSFixedDH = 4 // A certificate containing a static DH key
// See RFC 4492 sections 3 and 5.5.
certTypeECDSASign = 64 // A certificate containing an ECDSA-capable public key, signed with ECDSA.
certTypeRSAFixedECDH = 65 // A certificate containing an ECDH-capable public key, signed with RSA.
certTypeECDSAFixedECDH = 66 // A certificate containing an ECDH-capable public key, signed with ECDSA.
// Rest of these are reserved by the TLS spec
certTypeSM2Sign = 74 // A certificate containing an ECDSA-capable public key, signed with ECDSA.
)
// Signature algorithms (for internal signaling use). Starting at 16 to avoid overlap with
// TLS 1.2 codepoints (RFC 5246, section A.4.1), with which these have nothing to do.
const (
signaturePKCS1v15 uint8 = iota + 16
signatureECDSA
signatureRSAPSS
signatureSM2
)
// supportedSignatureAlgorithms contains the signature and hash algorithms that
// the code advertises as supported in a TLS 1.2 ClientHello and in a TLS 1.2
// CertificateRequest. The two fields are merged to match with TLS 1.3.
// Note that in TLS 1.2, the ECDSA algorithms are not constrained to P-256, etc.
var supportedSignatureAlgorithms = []SignatureScheme{
PKCS1WithSHA256,
ECDSAWithP256AndSHA256,
PKCS1WithSHA384,
ECDSAWithP384AndSHA384,
PKCS1WithSHA512,
ECDSAWithP521AndSHA512,
PKCS1WithSHA1,
ECDSAWithSHA1,
}
// ConnectionState records basic TLS details about the connection.
type ConnectionState struct {
Version uint16 // TLS version used by the connection (e.g. VersionTLS12)
HandshakeComplete bool // TLS handshake is complete
DidResume bool // connection resumes a previous TLS connection
CipherSuite uint16 // cipher suite in use (TLS_RSA_WITH_RC4_128_SHA, ...)
NegotiatedProtocol string // negotiated next protocol (not guaranteed to be from Config.NextProtos)
NegotiatedProtocolIsMutual bool // negotiated protocol was advertised by server (client side only)
ServerName string // server name requested by client, if any (server side only)
PeerCertificates []*x509.Certificate // certificate chain presented by remote peer
VerifiedChains [][]*x509.Certificate // verified chains built from PeerCertificates
SignedCertificateTimestamps [][]byte // SCTs from the server, if any
OCSPResponse []byte // stapled OCSP response from server, if any
// ekm is a closure exposed via ExportKeyingMaterial.
ekm func(label string, context []byte, length int) ([]byte, error)
// TLSUnique contains the "tls-unique" channel binding value (see RFC
// 5929, section 3). For resumed sessions this value will be nil
// because resumption does not include enough context (see
// https://mitls.org/pages/attacks/3SHAKE#channelbindings). This will
// change in future versions of Go once the TLS master-secret fix has
// been standardized and implemented.
TLSUnique []byte
}
// ExportKeyingMaterial returns length bytes of exported key material in a new
// slice as defined in https://tools.ietf.org/html/rfc5705. If context is nil,
// it is not used as part of the seed. If the connection was set to allow
// renegotiation via Config.Renegotiation, this function will return an error.
func (cs *ConnectionState) ExportKeyingMaterial(label string, context []byte, length int) ([]byte, error) {
return cs.ekm(label, context, length)
}
// ClientAuthType declares the policy the server will follow for
// TLS Client Authentication.
type ClientAuthType int
const (
NoClientCert ClientAuthType = iota
RequestClientCert
RequireAnyClientCert
VerifyClientCertIfGiven
RequireAndVerifyClientCert
)
// ClientSessionState contains the state needed by clients to resume TLS
// sessions.
type ClientSessionState struct {
sessionTicket []uint8 // Encrypted ticket used for session resumption with server
vers uint16 // SSL/TLS version negotiated for the session
cipherSuite uint16 // Ciphersuite negotiated for the session
masterSecret []byte // MasterSecret generated by client on a full handshake
serverCertificates []*x509.Certificate // Certificate chain presented by the server
verifiedChains [][]*x509.Certificate // Certificate chains we built for verification
}
// ClientSessionCache is a cache of ClientSessionState objects that can be used
// by a client to resume a TLS session with a given server. ClientSessionCache
// implementations should expect to be called concurrently from different
// goroutines. Only ticket-based resumption is supported, not SessionID-based
// resumption.
type ClientSessionCache interface {
// Get searches for a ClientSessionState associated with the given key.
// On return, ok is true if one was found.
Get(sessionKey string) (session *ClientSessionState, ok bool)
// Put adds the ClientSessionState to the cache with the given key.
Put(sessionKey string, cs *ClientSessionState)
}
// SignatureScheme identifies a signature algorithm supported by TLS. See
// https://tools.ietf.org/html/draft-ietf-tls-tls13-18#section-4.2.3.
type SignatureScheme uint16
const (
PKCS1WithSHA1 SignatureScheme = 0x0201
PKCS1WithSHA256 SignatureScheme = 0x0401
PKCS1WithSHA384 SignatureScheme = 0x0501
PKCS1WithSHA512 SignatureScheme = 0x0601
PSSWithSHA256 SignatureScheme = 0x0804
PSSWithSHA384 SignatureScheme = 0x0805
PSSWithSHA512 SignatureScheme = 0x0806
ECDSAWithP256AndSHA256 SignatureScheme = 0x0403
ECDSAWithP384AndSHA384 SignatureScheme = 0x0503
ECDSAWithP521AndSHA512 SignatureScheme = 0x0603
// Legacy signature and hash algorithms for TLS 1.2.
ECDSAWithSHA1 SignatureScheme = 0x0203
// ------------------------------------------- //
SM2WithSM3 SignatureScheme = 0x0103
)
// ClientHelloInfo contains information from a ClientHello message in order to
// guide certificate selection in the GetCertificate callback.
type ClientHelloInfo struct {
// CipherSuites lists the CipherSuites supported by the client (e.g.
// TLS_RSA_WITH_RC4_128_SHA).
CipherSuites []uint16
// ServerName indicates the name of the server requested by the client
// in order to support virtual hosting. ServerName is only set if the
// client is using SNI (see
// https://tools.ietf.org/html/rfc4366#section-3.1).
ServerName string
// SupportedCurves lists the elliptic curves supported by the client.
// SupportedCurves is set only if the Supported Elliptic Curves
// Extension is being used (see
// https://tools.ietf.org/html/rfc4492#section-5.1.1).
SupportedCurves []CurveID
// SupportedPoints lists the point formats supported by the client.
// SupportedPoints is set only if the Supported Point Formats Extension
// is being used (see
// https://tools.ietf.org/html/rfc4492#section-5.1.2).
SupportedPoints []uint8
// SignatureSchemes lists the signature and hash schemes that the client
// is willing to verify. SignatureSchemes is set only if the Signature
// Algorithms Extension is being used (see
// https://tools.ietf.org/html/rfc5246#section-7.4.1.4.1).
SignatureSchemes []SignatureScheme
// SupportedProtos lists the application protocols supported by the client.
// SupportedProtos is set only if the Application-Layer Protocol
// Negotiation Extension is being used (see
// https://tools.ietf.org/html/rfc7301#section-3.1).
//
// Servers can select a protocol by setting Config.NextProtos in a
// GetConfigForClient return value.
SupportedProtos []string
// SupportedVersions lists the TLS versions supported by the client.
// For TLS versions less than 1.3, this is extrapolated from the max
// version advertised by the client, so values other than the greatest
// might be rejected if used.
SupportedVersions []uint16
// Conn is the underlying net.Conn for the connection. Do not read
// from, or write to, this connection; that will cause the TLS
// connection to fail.
Conn net.Conn
}
// CertificateRequestInfo contains information from a server's
// CertificateRequest message, which is used to demand a certificate and proof
// of control from a client.
type CertificateRequestInfo struct {
// AcceptableCAs contains zero or more, DER-encoded, X.501
// Distinguished Names. These are the names of root or intermediate CAs
// that the server wishes the returned certificate to be signed by. An
// empty slice indicates that the server has no preference.
AcceptableCAs [][]byte
// SignatureSchemes lists the signature schemes that the server is
// willing to verify.
SignatureSchemes []SignatureScheme
}
// RenegotiationSupport enumerates the different levels of support for TLS
// renegotiation. TLS renegotiation is the act of performing subsequent
// handshakes on a connection after the first. This significantly complicates
// the state machine and has been the source of numerous, subtle security
// issues. Initiating a renegotiation is not supported, but support for
// accepting renegotiation requests may be enabled.
//
// Even when enabled, the server may not change its identity between handshakes
// (i.e. the leaf certificate must be the same). Additionally, concurrent
// handshake and application data flow is not permitted so renegotiation can
// only be used with protocols that synchronise with the renegotiation, such as
// HTTPS.
type RenegotiationSupport int
const (
// RenegotiateNever disables renegotiation.
RenegotiateNever RenegotiationSupport = iota
// RenegotiateOnceAsClient allows a remote server to request
// renegotiation once per connection.
RenegotiateOnceAsClient
// RenegotiateFreelyAsClient allows a remote server to repeatedly
// request renegotiation.
RenegotiateFreelyAsClient
)
// A Config structure is used to configure a TLS client or server.
// After one has been passed to a TLS function it must not be
// modified. A Config may be reused; the tls package will also not
// modify it.
type Config struct {
// Rand provides the source of entropy for nonces and RSA blinding.
// If Rand is nil, TLS uses the cryptographic random reader in package
// crypto/rand.
// The Reader must be safe for use by multiple goroutines.
Rand io.Reader
// Time returns the current time as the number of seconds since the epoch.
// If Time is nil, TLS uses time.Now.
Time func() time.Time
// Certificates contains one or more certificate chains to present to
// the other side of the connection. Server configurations must include
// at least one certificate or else set GetCertificate. Clients doing
// client-authentication may set either Certificates or
// GetClientCertificate.
Certificates []Certificate
// NameToCertificate maps from a certificate name to an element of
// Certificates. Note that a certificate name can be of the form
// '*.example.com' and so doesn't have to be a domain name as such.
// See Config.BuildNameToCertificate
// The nil value causes the first element of Certificates to be used
// for all connections.
NameToCertificate map[string]*Certificate
// GetCertificate returns a Certificate based on the given
// ClientHelloInfo. It will only be called if the client supplies SNI
// information or if Certificates is empty.
//
// If GetCertificate is nil or returns nil, then the certificate is
// retrieved from NameToCertificate. If NameToCertificate is nil, the
// first element of Certificates will be used.
GetCertificate func(*ClientHelloInfo) (*Certificate, error)
// GetClientCertificate, if not nil, is called when a server requests a
// certificate from a client. If set, the contents of Certificates will
// be ignored.
//
// If GetClientCertificate returns an error, the handshake will be
// aborted and that error will be returned. Otherwise
// GetClientCertificate must return a non-nil Certificate. If
// Certificate.Certificate is empty then no certificate will be sent to
// the server. If this is unacceptable to the server then it may abort
// the handshake.
//
// GetClientCertificate may be called multiple times for the same
// connection if renegotiation occurs or if TLS 1.3 is in use.
GetClientCertificate func(*CertificateRequestInfo) (*Certificate, error)
// GetConfigForClient, if not nil, is called after a ClientHello is
// received from a client. It may return a non-nil Config in order to
// change the Config that will be used to handle this connection. If
// the returned Config is nil, the original Config will be used. The
// Config returned by this callback may not be subsequently modified.
//
// If GetConfigForClient is nil, the Config passed to Server() will be
// used for all connections.
//
// Uniquely for the fields in the returned Config, session ticket keys
// will be duplicated from the original Config if not set.
// Specifically, if SetSessionTicketKeys was called on the original
// config but not on the returned config then the ticket keys from the
// original config will be copied into the new config before use.
// Otherwise, if SessionTicketKey was set in the original config but
// not in the returned config then it will be copied into the returned
// config before use. If neither of those cases applies then the key
// material from the returned config will be used for session tickets.
GetConfigForClient func(*ClientHelloInfo) (*Config, error)
// VerifyPeerCertificate, if not nil, is called after normal
// certificate verification by either a TLS client or server. It
// receives the raw ASN.1 certificates provided by the peer and also
// any verified chains that normal processing found. If it returns a
// non-nil error, the handshake is aborted and that error results.
//
// If normal verification fails then the handshake will abort before
// considering this callback. If normal verification is disabled by
// setting InsecureSkipVerify, or (for a server) when ClientAuth is
// RequestClientCert or RequireAnyClientCert, then this callback will
// be considered but the verifiedChains argument will always be nil.
VerifyPeerCertificate func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error
// RootCAs defines the set of root certificate authorities
// that clients use when verifying server certificates.
// If RootCAs is nil, TLS uses the host's root CA set.
RootCAs *x509.CertPool
// NextProtos is a list of supported, application level protocols.
NextProtos []string
// ServerName is used to verify the hostname on the returned
// certificates unless InsecureSkipVerify is given. It is also included
// in the client's handshake to support virtual hosting unless it is
// an IP address.
ServerName string
// ClientAuth determines the server's policy for
// TLS Client Authentication. The default is NoClientCert.
ClientAuth ClientAuthType
// ClientCAs defines the set of root certificate authorities
// that servers use if required to verify a client certificate
// by the policy in ClientAuth.
ClientCAs *x509.CertPool
// InsecureSkipVerify controls whether a client verifies the
// server's certificate chain and host name.
// If InsecureSkipVerify is true, TLS accepts any certificate
// presented by the server and any host name in that certificate.
// In this mode, TLS is susceptible to man-in-the-middle attacks.
// This should be used only for testing.
InsecureSkipVerify bool
// CipherSuites is a list of supported cipher suites. If CipherSuites
// is nil, TLS uses a list of suites supported by the implementation.
CipherSuites []uint16
// PreferServerCipherSuites controls whether the server selects the
// client's most preferred ciphersuite, or the server's most preferred
// ciphersuite. If true then the server's preference, as expressed in
// the order of elements in CipherSuites, is used.
PreferServerCipherSuites bool
// SessionTicketsDisabled may be set to true to disable session ticket
// (resumption) support. Note that on clients, session ticket support is
// also disabled if ClientSessionCache is nil.
SessionTicketsDisabled bool
// SessionTicketKey is used by TLS servers to provide session
// resumption. See RFC 5077. If zero, it will be filled with
// random data before the first server handshake.
//
// If multiple servers are terminating connections for the same host
// they should all have the same SessionTicketKey. If the
// SessionTicketKey leaks, previously recorded and future TLS
// connections using that key are compromised.
SessionTicketKey [32]byte
// ClientSessionCache is a cache of ClientSessionState entries for TLS
// session resumption. It is only used by clients.
ClientSessionCache ClientSessionCache
// MinVersion contains the minimum SSL/TLS version that is acceptable.
// If zero, then TLS 1.0 is taken as the minimum.
MinVersion uint16
// MaxVersion contains the maximum SSL/TLS version that is acceptable.
// If zero, then the maximum version supported by this package is used,
// which is currently TLS 1.2.
MaxVersion uint16
// CurvePreferences contains the elliptic curves that will be used in
// an ECDHE handshake, in preference order. If empty, the default will
// be used.
CurvePreferences []CurveID
// DynamicRecordSizingDisabled disables adaptive sizing of TLS records.
// When true, the largest possible TLS record size is always used. When
// false, the size of TLS records may be adjusted in an attempt to
// improve latency.
DynamicRecordSizingDisabled bool
// Renegotiation controls what types of renegotiation are supported.
// The default, none, is correct for the vast majority of applications.
Renegotiation RenegotiationSupport
// KeyLogWriter optionally specifies a destination for TLS master secrets
// in NSS key log format that can be used to allow external programs
// such as Wireshark to decrypt TLS connections.
// See https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format.
// Use of KeyLogWriter compromises security and should only be
// used for debugging.
KeyLogWriter io.Writer
serverInitOnce sync.Once // guards calling (*Config).serverInit
// mutex protects sessionTicketKeys.
mutex sync.RWMutex
// sessionTicketKeys contains zero or more ticket keys. If the length
// is zero, SessionTicketsDisabled must be true. The first key is used
// for new tickets and any subsequent keys can be used to decrypt old
// tickets.
sessionTicketKeys []ticketKey
}
// ticketKeyNameLen is the number of bytes of identifier that is prepended to
// an encrypted session ticket in order to identify the key used to encrypt it.
const ticketKeyNameLen = 16
// ticketKey is the internal representation of a session ticket key.
type ticketKey struct {
// keyName is an opaque byte string that serves to identify the session
// ticket key. It's exposed as plaintext in every session ticket.
keyName [ticketKeyNameLen]byte
aesKey [16]byte
hmacKey [16]byte
}
// ticketKeyFromBytes converts from the external representation of a session
// ticket key to a ticketKey. Externally, session ticket keys are 32 random
// bytes and this function expands that into sufficient name and key material.
func ticketKeyFromBytes(b [32]byte) (key ticketKey) {
hashed := sha512.Sum512(b[:])
copy(key.keyName[:], hashed[:ticketKeyNameLen])
copy(key.aesKey[:], hashed[ticketKeyNameLen:ticketKeyNameLen+16])
copy(key.hmacKey[:], hashed[ticketKeyNameLen+16:ticketKeyNameLen+32])
return key
}
// Clone returns a shallow clone of c. It is safe to clone a Config that is
// being used concurrently by a TLS client or server.
func (c *Config) Clone() *Config {
// Running serverInit ensures that it's safe to read
// SessionTicketsDisabled.
c.serverInitOnce.Do(func() { c.serverInit(nil) })
var sessionTicketKeys []ticketKey
c.mutex.RLock()
sessionTicketKeys = c.sessionTicketKeys
c.mutex.RUnlock()
return &Config{
Rand: c.Rand,
Time: c.Time,
Certificates: c.Certificates,
NameToCertificate: c.NameToCertificate,
GetCertificate: c.GetCertificate,
GetClientCertificate: c.GetClientCertificate,
GetConfigForClient: c.GetConfigForClient,
VerifyPeerCertificate: c.VerifyPeerCertificate,
RootCAs: c.RootCAs,
NextProtos: c.NextProtos,
ServerName: c.ServerName,
ClientAuth: c.ClientAuth,
ClientCAs: c.ClientCAs,
InsecureSkipVerify: c.InsecureSkipVerify,
CipherSuites: c.CipherSuites,
PreferServerCipherSuites: c.PreferServerCipherSuites,
SessionTicketsDisabled: c.SessionTicketsDisabled,
SessionTicketKey: c.SessionTicketKey,
ClientSessionCache: c.ClientSessionCache,
MinVersion: c.MinVersion,
MaxVersion: c.MaxVersion,
CurvePreferences: c.CurvePreferences,
DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
Renegotiation: c.Renegotiation,
KeyLogWriter: c.KeyLogWriter,
sessionTicketKeys: sessionTicketKeys,
}
}
// serverInit is run under c.serverInitOnce to do initialization of c. If c was
// returned by a GetConfigForClient callback then the argument should be the
// Config that was passed to Server, otherwise it should be nil.
func (c *Config) serverInit(originalConfig *Config) {
if c.SessionTicketsDisabled || len(c.ticketKeys()) != 0 {
return
}
alreadySet := false
for _, b := range c.SessionTicketKey {
if b != 0 {
alreadySet = true
break
}
}
if !alreadySet {
if originalConfig != nil {
copy(c.SessionTicketKey[:], originalConfig.SessionTicketKey[:])
} else if _, err := io.ReadFull(c.rand(), c.SessionTicketKey[:]); err != nil {
c.SessionTicketsDisabled = true
return
}
}
if originalConfig != nil {
originalConfig.mutex.RLock()
c.sessionTicketKeys = originalConfig.sessionTicketKeys
originalConfig.mutex.RUnlock()
} else {
c.sessionTicketKeys = []ticketKey{ticketKeyFromBytes(c.SessionTicketKey)}
}
}
func (c *Config) ticketKeys() []ticketKey {
c.mutex.RLock()
// c.sessionTicketKeys is constant once created. SetSessionTicketKeys
// will only update it by replacing it with a new value.
ret := c.sessionTicketKeys
c.mutex.RUnlock()
return ret
}
// SetSessionTicketKeys updates the session ticket keys for a server. The first
// key will be used when creating new tickets, while all keys can be used for
// decrypting tickets. It is safe to call this function while the server is
// running in order to rotate the session ticket keys. The function will panic
// if keys is empty.
func (c *Config) SetSessionTicketKeys(keys [][32]byte) {
if len(keys) == 0 {
panic("tls: keys must have at least one key")
}
newKeys := make([]ticketKey, len(keys))
for i, bytes := range keys {
newKeys[i] = ticketKeyFromBytes(bytes)
}
c.mutex.Lock()
c.sessionTicketKeys = newKeys
c.mutex.Unlock()
}
func (c *Config) rand() io.Reader {
r := c.Rand
if r == nil {
return rand.Reader
}
return r
}
func (c *Config) time() time.Time {
t := c.Time
if t == nil {
t = time.Now
}
return t()
}
func (c *Config) cipherSuites() []uint16 {
s := c.CipherSuites
if s == nil {
s = defaultCipherSuites()
}
return s
}
func (c *Config) minVersion() uint16 {
if c == nil || c.MinVersion == 0 {
return minVersion
}
return c.MinVersion
}
func (c *Config) maxVersion() uint16 {
if c == nil || c.MaxVersion == 0 {
return maxVersion
}
return c.MaxVersion
}
var defaultCurvePreferences = []CurveID{X25519, CurveP256, CurveP384, CurveP521 }
func (c *Config) curvePreferences() []CurveID {
if c == nil || len(c.CurvePreferences) == 0 {
return defaultCurvePreferences
}
return c.CurvePreferences
}
// mutualVersion returns the protocol version to use given the advertised
// version of the peer.
func (c *Config) mutualVersion(vers uint16) (uint16, bool) {
minVersion := c.minVersion()
maxVersion := c.maxVersion()
if vers < minVersion {
return 0, false
}
if vers > maxVersion {
vers = maxVersion
}
return vers, true
}
// getCertificate returns the best certificate for the given ClientHelloInfo,
// defaulting to the first element of c.Certificates.
func (c *Config) getCertificate(clientHello *ClientHelloInfo) (*Certificate, error) {
if c.GetCertificate != nil &&
(len(c.Certificates) == 0 || len(clientHello.ServerName) > 0) {
cert, err := c.GetCertificate(clientHello)
if cert != nil || err != nil {
return cert, err
}
}
if len(c.Certificates) == 0 {
return nil, errors.New("tls: no certificates configured")
}
if len(c.Certificates) == 1 || c.NameToCertificate == nil {
// There's only one choice, so no point doing any work.
return &c.Certificates[0], nil
}
name := strings.ToLower(clientHello.ServerName)
for len(name) > 0 && name[len(name)-1] == '.' {
name = name[:len(name)-1]
}
if cert, ok := c.NameToCertificate[name]; ok {
return cert, nil
}
// try replacing labels in the name with wildcards until we get a
// match.
labels := strings.Split(name, ".")
for i := range labels {
labels[i] = "*"
candidate := strings.Join(labels, ".")
if cert, ok := c.NameToCertificate[candidate]; ok {
return cert, nil
}
}
// If nothing matches, return the first certificate.
return &c.Certificates[0], nil
}
// BuildNameToCertificate parses c.Certificates and builds c.NameToCertificate
// from the CommonName and SubjectAlternateName fields of each of the leaf
// certificates.
func (c *Config) BuildNameToCertificate() {
c.NameToCertificate = make(map[string]*Certificate)
for i := range c.Certificates {
cert := &c.Certificates[i]
x509Cert, err := x509.ParseCertificate(cert.Certificate[0])
if err != nil {
continue
}
if len(x509Cert.Subject.CommonName) > 0 {
c.NameToCertificate[x509Cert.Subject.CommonName] = cert
}
for _, san := range x509Cert.DNSNames {
c.NameToCertificate[san] = cert
}
}
}
// writeKeyLog logs client random and master secret if logging was enabled by
// setting c.KeyLogWriter.
func (c *Config) writeKeyLog(clientRandom, masterSecret []byte) error {
if c.KeyLogWriter == nil {
return nil
}
logLine := []byte(fmt.Sprintf("CLIENT_RANDOM %x %x\n", clientRandom, masterSecret))
writerMutex.Lock()
_, err := c.KeyLogWriter.Write(logLine)
writerMutex.Unlock()
return err
}
// writerMutex protects all KeyLogWriters globally. It is rarely enabled,
// and is only for debugging, so a global mutex saves space.
var writerMutex sync.Mutex
// A Certificate is a chain of one or more certificates, leaf first.
type Certificate struct {
Certificate [][]byte
// PrivateKey contains the private key corresponding to the public key
// in Leaf. For a server, this must implement crypto.Signer and/or
// crypto.Decrypter, with an RSA or ECDSA PublicKey. For a client
// (performing client authentication), this must be a crypto.Signer
// with an RSA or ECDSA PublicKey.
PrivateKey crypto.PrivateKey
// OCSPStaple contains an optional OCSP response which will be served
// to clients that request it.
OCSPStaple []byte
// SignedCertificateTimestamps contains an optional list of Signed
// Certificate Timestamps which will be served to clients that request it.
SignedCertificateTimestamps [][]byte
// Leaf is the parsed form of the leaf certificate, which may be
// initialized using x509.ParseCertificate to reduce per-handshake
// processing for TLS clients doing client authentication. If nil, the
// leaf certificate will be parsed as needed.
Leaf *x509.Certificate
}
type handshakeMessage interface {
marshal() []byte
unmarshal([]byte) bool
}
// lruSessionCache is a ClientSessionCache implementation that uses an LRU
// caching strategy.
type lruSessionCache struct {
sync.Mutex
m map[string]*list.Element
q *list.List
capacity int
}
type lruSessionCacheEntry struct {
sessionKey string
state *ClientSessionState
}
// NewLRUClientSessionCache returns a ClientSessionCache with the given
// capacity that uses an LRU strategy. If capacity is < 1, a default capacity
// is used instead.
func NewLRUClientSessionCache(capacity int) ClientSessionCache {
const defaultSessionCacheCapacity = 64
if capacity < 1 {
capacity = defaultSessionCacheCapacity
}
return &lruSessionCache{
m: make(map[string]*list.Element),
q: list.New(),
capacity: capacity,
}
}
// Put adds the provided (sessionKey, cs) pair to the cache.
func (c *lruSessionCache) Put(sessionKey string, cs *ClientSessionState) {
c.Lock()
defer c.Unlock()
if elem, ok := c.m[sessionKey]; ok {
entry := elem.Value.(*lruSessionCacheEntry)
entry.state = cs
c.q.MoveToFront(elem)
return
}
if c.q.Len() < c.capacity {
entry := &lruSessionCacheEntry{sessionKey, cs}
c.m[sessionKey] = c.q.PushFront(entry)
return
}
elem := c.q.Back()
entry := elem.Value.(*lruSessionCacheEntry)
delete(c.m, entry.sessionKey)
entry.sessionKey = sessionKey
entry.state = cs
c.q.MoveToFront(elem)
c.m[sessionKey] = elem
}
// Get returns the ClientSessionState value associated with a given key. It
// returns (nil, false) if no value is found.
func (c *lruSessionCache) Get(sessionKey string) (*ClientSessionState, bool) {
c.Lock()
defer c.Unlock()
if elem, ok := c.m[sessionKey]; ok {
c.q.MoveToFront(elem)
return elem.Value.(*lruSessionCacheEntry).state, true
}
return nil, false
}
// TODO(jsing): Make these available to both crypto/x509 and crypto/tls.
type dsaSignature struct {
R, S *big.Int
}
type ecdsaSignature dsaSignature
var emptyConfig Config
func defaultConfig() *Config {
return &emptyConfig
}
var (
once sync.Once
varDefaultCipherSuites []uint16
)
func defaultCipherSuites() []uint16 {
once.Do(initDefaultCipherSuites)
return varDefaultCipherSuites
}
func initDefaultCipherSuites() {
var topCipherSuites []uint16
//
//// Check the cpu flags for each platform that has optimized GCM implementations.
//// Worst case, these variables will just all be false
//hasGCMAsmAMD64 := cpu.X86.HasAES && cpu.X86.HasPCLMULQDQ
//
//hasGCMAsmARM64 := cpu.ARM64.HasAES && cpu.ARM64.HasPMULL
//
//// Keep in sync with crypto/aes/cipher_s390x.go.
//hasGCMAsmS390X := cpu.S390X.HasAES && cpu.S390X.HasAESCBC && cpu.S390X.HasAESCTR && (cpu.S390X.HasGHASH || cpu.S390X.HasAESGCM)
//
//hasGCMAsm := hasGCMAsmAMD64 || hasGCMAsmARM64 || hasGCMAsmS390X
//
//if hasGCMAsm {
// // If AES-GCM hardware is provided then prioritise AES-GCM
// // cipher suites.
// topCipherSuites = []uint16{
// TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
// TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
// TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
// TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
// TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
// TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
// }
//} else {
// Without AES-GCM hardware, we put the ChaCha20-Poly1305
// cipher suites first.
topCipherSuites = []uint16{
TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
// ----------------------------------------------- //
TLS_ECDHE_SM2_WITH_SM4_128_CBC_SM3,
// }
}
varDefaultCipherSuites = make([]uint16, 0, len(cipherSuites))
varDefaultCipherSuites = append(varDefaultCipherSuites, topCipherSuites...)
NextCipherSuite:
for _, suite := range cipherSuites {
if suite.flags&suiteDefaultOff != 0 {
continue
}
for _, existing := range varDefaultCipherSuites {
if existing == suite.id {
continue NextCipherSuite
}
}
varDefaultCipherSuites = append(varDefaultCipherSuites, suite.id)
}
}
func unexpectedMessageError(wanted, got interface{}) error |
func isSupportedSignatureAlgorithm(sigAlg SignatureScheme, supportedSignatureAlgorithms []SignatureScheme) bool {
for _, s := range supportedSignatureAlgorithms {
if s == sigAlg {
return true
}
}
return false
}
// signatureFromSignatureScheme maps a signature algorithm to the underlying
// signature method (without hash function).
func signatureFromSignatureScheme(signatureAlgorithm SignatureScheme) uint8 {
switch signatureAlgorithm {
case PKCS1WithSHA1, PKCS1WithSHA256, PKCS1WithSHA384, PKCS1WithSHA512:
return signaturePKCS1v15
case PSSWithSHA256, PSSWithSHA384, PSSWithSHA512:
return signatureRSAPSS
case ECDSAWithSHA1, ECDSAWithP256AndSHA256, ECDSAWithP384AndSHA384, ECDSAWithP521AndSHA512:
return signatureECDSA
// ------------------------------------------------ //
case SM2WithSM3:
return signatureSM2
default:
return 0
}
}
| {
return fmt.Errorf("tls: received unexpected handshake message of type %T when waiting for %T", got, wanted)
} |
util.go | /*
Copyright 2021 Teodor Spæren
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"context"
"cloud.google.com/go/pubsub"
)
type ctxKey string
var ctxKeyPubsubClient = ctxKey("pubsub")
func setPubsubClient(ctx context.Context, client *pubsub.Client) context.Context {
return context.WithValue(ctx, ctxKeyPubsubClient, client)
}
func getPubsubClient(ctx context.Context) *pubsub.Client {
client, ok := ctx.Value(ctxKeyPubsubClient).(*pubsub.Client)
if !ok { | return client
}
|
panic("cannot get client")
}
|
global.js | var objToStick = $(".home-properties"); //Получаем нужный объект
if (objToStick.length) {
var topOfObjToStick = objToStick.offset().top; //Получаем начальное расположение нашего блока
$(window).scroll(function () {
var windowScroll = $(window).scrollTop(); //Получаем величину, показывающую на сколько прокручено окно
if (windowScroll > topOfObjToStick) { // Если прокрутили больше, чем расстояние до блока, то приклеиваем его
$('.home-form-call').addClass("showhomeCall");
} else {
$('.home-form-call').removeClass("showhomeCall");
};
});
}
var saleSelectedCountryId = 0;
var rentSelectedCountryId = 0;
function checkSaleSelectedCountry(id) {
var slider = $('#price-slider');
var sliderPound = $('#price-slider-pound');
if (id == 1) {
slider.hide();
sliderPound.show();
} else {
slider.show();
sliderPound.hide();
}
}
function checkRentSelectedCountry(id) {
var sliderPerWeek = $('#price-slider-rent-per-week');
var sliderPerMonth = $('#price-slider-rent-per-month');
var sliderPerWeekPound = $('#price-slider-rent-per-week-pound');
var sliderPerMonthPound = $('#price-slider-rent-per-month-pound');
if (id == 1) {
sliderPerWeek.hide();
sliderPerMonth.hide();
sliderPerWeekPound.show();
sliderPerMonthPound.show();
} else {
sliderPerWeek.show();
sliderPerMonth.show();
sliderPerWeekPound.hide();
sliderPerMonthPound.hide();
}
}
jQuery(document).ready(function($) {
"use strict";
$('.property-content').matchHeight();
saleSelectedCountryId = $('#search_sale-country') ? $('#search_sale-country').val() : 0;
rentSelectedCountryId = $('#search_rent-country') ? $('#search_rent-country').val() : 0;
checkSaleSelectedCountry(saleSelectedCountryId);
checkRentSelectedCountry(rentSelectedCountryId);
$('body').on('change', '#search_sale-country', function () {
saleSelectedCountryId = $('#search_sale-country').val();
checkSaleSelectedCountry(saleSelectedCountryId);
// console.log(saleSelectedCountryId, rentSelectedCountryId);
});
$('body').on('change', '#search_rent-country', function () {
rentSelectedCountryId = $('#search_rent-country').val();
checkRentSelectedCountry(rentSelectedCountryId);
// console.log(saleSelectedCountryId, rentSelectedCountryId);
});
// console.log(saleSelectedCountryId, rentSelectedCountryId);
/***************************************************************************/
//MAIN MENU SUB MENU TOGGLE
/***************************************************************************/
$('.nav.navbar-nav > li.menu-item-has-children > a').on('click', function(event){
event.preventDefault();
$(this).parent().find('.sub-menu').toggle();
$(this).parent().find('.sub-menu li .sub-menu').hide();
});
$('.nav.navbar-nav li .sub-menu li.menu-item-has-children > a ').on('click', function(event){
event.preventDefault();
$(this).parent().find('.sub-menu').toggle();
});
/***************************************************************************/
//TABS
/***************************************************************************/
$( function() {
$( ".tabs" ).tabs({
create: function(event, ui) {
$(this).fadeIn();
}
});
});
/***************************************************************************/
//ACTIVATE CHOSEN
/***************************************************************************/
$("select").chosen({disable_search_threshold: 11});
/***************************************************************************/
//ACCORDIONS
/***************************************************************************/
$(function() {
$( "#accordion" ).accordion({
heightStyle: "content",
closedSign: '<i class="fa fa-minus"></i>',
openedSign: '<i class="fa fa-plus"></i>'
});
});
/***************************************************************************/
//SLICK SLIDER - SIMPLE SLIDER
/***************************************************************************/
$('.slider.slider-simple').slick({
prevArrow: $('.slider-nav-simple-slider .slider-prev'),
nextArrow: $('.slider-nav-simple-slider .slider-next'),
adaptiveHeight: true,
autoplay: true,
autoplaySpeed: 2000,
fade: true
});
/***************************************************************************/
//SLICK SLIDER - FEATURED PROPERTIES
/***************************************************************************/
$('.slider.slider-featured').slick({
prevArrow: $('.slider-nav-properties-featured .slider-prev'),
nextArrow: $('.slider-nav-properties-featured .slider-next'),
slidesToShow: 4,
slidesToScroll: 1,
responsive: [
{
breakpoint: 990,
settings: {
slidesToShow: 3,
slidesToScroll: 1
}
},
{
breakpoint: 767,
settings: {
slidesToShow: 2,
slidesToScroll: 1
}
},
{
breakpoint: 589,
settings: {
slidesToShow: 1,
slidesToScroll: 1
}
}
]
});
/***************************************************************************/
//SLICK SLIDER - TESTIMONIALS
/***************************************************************************/
$('.slider.slider-testimonials').slick({
prevArrow: $('.slider-nav-testimonials .slider-prev'),
nextArrow: $('.slider-nav-testimonials .slider-next'),
adaptiveHeight: true
});
/***************************************************************************/
//SLICK SLIDER - PROPERTY GALLERY
/***************************************************************************/
$('.slider.slider-property-gallery').slick({
slidesToShow: 1,
slidesToScroll: 1,
adaptiveHeight: true,
arrows: true,
fade: true,
infinite:false,
asNavFor: '.property-gallery-pager',
prevArrow: $('.slider-nav-property-gallery .slider-prev'),
nextArrow: $('.slider-nav-property-gallery .slider-next'),
});
$('.property-gallery-pager').on('init', function(event, slick){
$('.slide-counter').text('1 / ' + slick.slideCount);
});
$('.property-gallery-pager').slick({
prevArrow: $('.slider-nav-property-gallery .slider-prev'),
nextArrow: $('.slider-nav-property-gallery .slider-next'),
slidesToShow: 5,
slidesToScroll: 1,
asNavFor: '.slider.slider-property-gallery',
dots: false,
focusOnSelect: true,
infinite:false
});
$('.property-gallery-pager').on('afterChange', function(event, slick, currentSlide, nextSlide){
currentSlide = currentSlide + 1;
var counter = currentSlide + ' / ' + slick.slideCount;
$('.slide-counter').text(counter);
});
//INITIATE SLIDES
$('.slide').addClass('initialized');
/***************************************************************************/
//FIXED HEADER
/***************************************************************************/
var navToggle = $('.header-default .navbar-toggle');
var mainMenuWrap = $('.header-default .main-menu-wrap');
if ($(window).scrollTop() > 140) {
navToggle.addClass('fixed');
mainMenuWrap.addClass('fixed');
}
$(window).bind('scroll', function () {
if ($(window).scrollTop() > 140) {
navToggle.addClass('fixed');
mainMenuWrap.addClass('fixed');
} else {
navToggle.removeClass('fixed');
mainMenuWrap.removeClass('fixed');
}
});
/***************************************************************************/
//INITIALIZE BLOG CREATIVE
/***************************************************************************/
$('.grid-blog').isotope({
itemSelector: '.col-lg-4'
});
/***************************************************************************/
//INITIALIZE PRICE RANGE SLIDER
/***************************************************************************/
// var sliders = document.getElementsByClassName('price-slider');
// var count = 0;
var saleMinPrice = parseInt($('[data-sale-min-price]').val());
var saleMaxPrice = parseInt($('[data-sale-max-price]').val());
var rentMinPricePerWeek = parseInt($('[data-rent-min-price-per-week]').val());
var rentMaxPricePerWeek = parseInt($('[data-rent-max-price-per-week]').val());
var rentMinPricePerMonth = parseInt($('[data-rent-min-price-per-month]').val());
var rentMaxPricePerMonth = parseInt($('[data-rent-max-price-per-month]').val());
| var saleMinPricePound = parseInt($('[data-sale-min-price-pound]').val());
var saleMaxPricePound = parseInt($('[data-sale-max-price-pound]').val());
var rentMinPricePerWeekPound = parseInt($('[data-rent-min-price-per-week-pound]').val());
var rentMaxPricePerWeekPound = parseInt($('[data-rent-max-price-per-week-pound]').val());
var rentMinPricePerMonthPound = parseInt($('[data-rent-min-price-per-month-pound]').val());
var rentMaxPricePerMonthPound = parseInt($('[data-rent-max-price-per-month-pound]').val());
var saleSliderPrice = document.getElementById('price-slider');
var saleSliderPricePound = document.getElementById('price-slider-pound');
if (saleSliderPrice) {
noUiSlider.create(saleSliderPrice, {
connect: true,
start: [saleMinPrice, saleMaxPrice],
// step: 1000,
margin: saleMinPrice == saleMaxPrice ? saleMinPrice : 0,
range: {
'min': [saleMinPrice],
'max': [saleMaxPrice]
},
tooltips: false,
format: wNumb({
decimals: 0,
thousand: ',',
prefix: '€',
})
}).set(saleMinPrice - 1, saleMaxPrice + 1);
$(saleSliderPrice).append("<div class='low-pr noUi-tooltip'>€" + saleMinPrice + "</div>");
$(saleSliderPrice).append("<div class='high-pr noUi-tooltip'>€" + saleMaxPrice + "</div>");
saleSliderPrice.noUiSlider.on('update', function ( values, handle ) {
$(saleSliderPrice).find('.low-pr').text(values[0]);
$(saleSliderPrice).find('.high-pr').text(values[1]);
});
}
if (saleSliderPricePound) {
noUiSlider.create(saleSliderPricePound, {
connect: true,
start: [saleMinPricePound, saleMaxPricePound],
// step: 1000,
margin: saleMinPricePound == saleMaxPricePound ? saleMinPricePound : 0,
range: {
'min': [saleMinPricePound],
'max': [saleMaxPricePound]
},
tooltips: false,
format: wNumb({
decimals: 0,
thousand: ',',
prefix: '₤',
})
}).set(saleMinPricePound - 1, saleMaxPricePound + 1);
$(saleSliderPricePound).append("<div class='low-pr noUi-tooltip'>₤" + saleMinPricePound + "</div>");
$(saleSliderPricePound).append("<div class='high-pr noUi-tooltip'>₤" + saleMaxPricePound + "</div>");
saleSliderPricePound.noUiSlider.on('update', function ( values, handle ) {
$(saleSliderPricePound).find('.low-pr').text(values[0]);
$(saleSliderPricePound).find('.high-pr').text(values[1]);
});
}
var sliderRentPerWeek = document.getElementById('price-slider-rent-per-week');
if (sliderRentPerWeek) {
noUiSlider.create(sliderRentPerWeek, {
connect: true,
start: [rentMinPricePerWeek, rentMaxPricePerWeek],
// step: 10,
margin: rentMinPricePerWeek == rentMaxPricePerWeek ? rentMinPricePerWeek : 0,
range: {
'min': [rentMinPricePerWeek],
'max': [rentMaxPricePerWeek]
},
tooltips: false,
format: wNumb({
decimals: 0,
thousand: ',',
prefix: '€',
})
}).set(rentMinPricePerWeek - 1, rentMaxPricePerWeek + 1);
$(sliderRentPerWeek).append("<div class='low-pr noUi-tooltip'>€" + rentMinPricePerWeek + "</div>");
$(sliderRentPerWeek).append("<div class='high-pr noUi-tooltip'>€" + rentMaxPricePerWeek + "</div>");
sliderRentPerWeek.noUiSlider.on('update', function ( values, handle ) {
$(sliderRentPerWeek).find('.low-pr').text(values[0]);
$(sliderRentPerWeek).find('.high-pr').text(values[1]);
});
}
var sliderRentPerMonth = document.getElementById('price-slider-rent-per-month');
if (sliderRentPerMonth) {
noUiSlider.create(sliderRentPerMonth, {
connect: true,
start: [rentMinPricePerMonth, rentMaxPricePerMonth],
// step: 10,
margin: rentMinPricePerMonth == rentMaxPricePerMonth ? rentMinPricePerMonth : 0,
range: {
'min': [rentMinPricePerMonth],
'max': [rentMaxPricePerMonth]
},
tooltips: false,
format: wNumb({
decimals: 0,
thousand: ',',
prefix: '€',
})
}).set(rentMinPricePerMonth - 1, rentMaxPricePerMonth + 1);
$(sliderRentPerMonth).append("<div class='low-pr noUi-tooltip'>€" + rentMinPricePerMonth + "</div>");
$(sliderRentPerMonth).append("<div class='high-pr noUi-tooltip'>€" + rentMaxPricePerMonth + "</div>");
sliderRentPerMonth.noUiSlider.on('update', function ( values, handle ) {
$(sliderRentPerMonth).find('.low-pr').text(values[0]);
$(sliderRentPerMonth).find('.high-pr').text(values[1]);
});
}
var sliderRentPerWeekPound = document.getElementById('price-slider-rent-per-week-pound');
if (sliderRentPerWeekPound) {
noUiSlider.create(sliderRentPerWeekPound, {
connect: true,
start: [rentMinPricePerWeekPound, rentMaxPricePerWeekPound],
// step: 10,
margin: rentMinPricePerWeekPound == rentMaxPricePerWeekPound ? rentMinPricePerWeekPound : 0,
range: {
'min': [rentMinPricePerWeekPound],
'max': [rentMaxPricePerWeekPound]
},
tooltips: false,
format: wNumb({
decimals: 0,
thousand: ',',
prefix: '₤',
})
}).set(rentMinPricePerWeekPound - 1, rentMaxPricePerWeekPound + 1);
$(sliderRentPerWeekPound).append("<div class='low-pr noUi-tooltip'>€" + rentMinPricePerWeekPound + "</div>");
$(sliderRentPerWeekPound).append("<div class='high-pr noUi-tooltip'>€" + rentMaxPricePerWeekPound + "</div>");
sliderRentPerWeekPound.noUiSlider.on('update', function ( values, handle ) {
$(sliderRentPerWeekPound).find('.low-pr').text(values[0]);
$(sliderRentPerWeekPound).find('.high-pr').text(values[1]);
});
}
var sliderRentPerMonthPound = document.getElementById('price-slider-rent-per-month-pound');
if (sliderRentPerMonthPound) {
noUiSlider.create(sliderRentPerMonthPound, {
connect: true,
start: [rentMinPricePerMonthPound, rentMaxPricePerMonthPound],
// step: 10,
range: {
'min': [rentMinPricePerMonthPound],
'max': [rentMaxPricePerMonthPound]
},
margin: rentMinPricePerMonthPound == rentMaxPricePerMonthPound ? rentMinPricePerMonthPound : 0,
tooltips: false,
format: wNumb({
decimals: 0,
thousand: ',',
prefix: '₤',
})
}).set(rentMinPricePerMonthPound - 1, rentMaxPricePerMonthPound + 1);
$(sliderRentPerMonthPound).append("<div class='low-pr noUi-tooltip'>€" + rentMinPricePerMonthPound + "</div>");
$(sliderRentPerMonthPound).append("<div class='high-pr noUi-tooltip'>€" + rentMaxPricePerMonthPound + "</div>");
sliderRentPerMonthPound.noUiSlider.on('update', function ( values, handle ) {
$(sliderRentPerMonthPound).find('.low-pr').text(values[0]);
$(sliderRentPerMonthPound).find('.high-pr').text(values[1]);
});
}
/***************************************************************************/
//FILTER TOGGLE (ON GOOGLE MAPS)
/***************************************************************************/
$('.filter-toggle').on('click', function() {
$(this).parent().find('form').stop(true, true).slideToggle();
});
/***************************************************************************/
//MULTIPAGE FORM
/***************************************************************************/
$('.multi-page-form .form-next').on('click', function() {
//validate required fields
var errors = [];
$('.multi-page-form').find('.error').remove();
$( ".multi-page-form .multi-page-form-content.active input.required" ).each(function( index ) {
if(!$(this).val()) {
$(this).parent().find('label').append('<span class="error"> This field is required</span>');
errors.push(index);
}
});
//if no errors
if (errors.length === 0) {
var active = $(this).parent().parent().find('.multi-page-form-content.active');
$(this).parent().parent().find('.form-nav .form-nav-item.completed').next().addClass('completed');
$(this).parent().parent().find('.form-nav .form-nav-item.completed span').html('<i class="fa fa-check"></i>');
if(active.next('.multi-page-form-content').next('.multi-page-form-content').length > 0) {
active.removeClass('active');
active.next().addClass('active');
}
else {
active.removeClass('active');
active.next().addClass('active');
}
}
});
$('.multi-page-form .form-prev').on('click', function() {
var active = $(this).parent().parent().find('.multi-page-form-content.active');
var lastCompleted = $(this).parent().parent().find('.form-nav .form-nav-item.completed').last();
lastCompleted.removeClass('completed');
lastCompleted.find('span').html(lastCompleted.index() + 1);
if(active.prev('.multi-page-form-content').prev('.multi-page-form-content').length > 0) {
active.removeClass('active');
active.prev().addClass('active');
}
else {
active.removeClass('active');
active.prev().addClass('active');
$(this).addClass('show-none');
$(this).parent().find('.disabled').show();
}
});
/******************************************************************************/
/** SUBMIT PROPERTY - ADDITIONAL IMAGES **/
/******************************************************************************/
var files_count = $('.additional-img-container .additional-image').length + 1;
$('.add-additional-img').on('click', function() {
files_count++;
$('.additional-img-container').append('<table><tr><td><div class="media-uploader-additional-img"><input type="file" class="additional_img" name="additional_img'+ files_count +'" value="" /><span class="delete-additional-img appended right"><i class="fa fa-trash"></i> Delete</span></div></td></tr></table>');
});
$('.additional-img-container').on("click", ".delete-additional-img", function() {
$(this).parent().parent().parent().parent().parent().remove();
});
/******************************************************************************/
/** SUBMIT PROPERTY - OWNER INFO **/
/******************************************************************************/
$('#owner-info input[type="radio"]').on('click', function() {
var input = $(this).val();
$('#owner-info .form-block-agent-options').hide();
if(input === 'agent') {
$('#owner-info .form-block-select-agent').slideDown('fast');
}
if(input === 'custom') {
$('#owner-info .form-block-custom-agent').slideDown('fast');
}
});
/***************************************************************************/
//AJAX CONTACT FORM
/***************************************************************************/
$(document).on('submit', 'form#contact-us', function() {
$('form#contact-us .error').remove();
var hasError = false;
$('.requiredField').each(function() {
if($.trim($(this).val()) === '') {
$(this).parent().find('label').append('<span class="error">This field is required!</span>');
$(this).addClass('inputError');
hasError = true;
} else if($(this).hasClass('email')) {
var emailReg = /^([\w-\.]+@([\w-]+\.)+[\w-]{2,4})?$/;
if(!emailReg.test($.trim($(this).val()))) {
$(this).parent().find('label').append('<span class="error">Sorry! You\'ve entered an invalid email.</span>');
$(this).addClass('inputError');
hasError = true;
}
}
});
if(!hasError) {
var formInput = $(this).serialize();
$.post($(this).attr('action'),formInput, function(data){
$('form#contact-us').slideUp("fast", function() {
$(this).before('<p class="alert-box success"><i class="fa fa-check icon"></i><strong>Thanks!</strong> Your email has been delivered!</p>');
});
});
}
return false;
});
});
/* HEADER PHONE / MAIL LINKS */
$( document ).ready(function() {
var win = $(window);
var mobileBtn = $('.mobile-phone');
var mailBtn = $('.mobile-mail');
var mobileText = $('.mobile-phone-text');
var mailText = $('.mail-phone-text');
var added = false;
function addMMEvents(){
if (!added){
mobileBtn.on('click', toggleMobile);
mailBtn.on('click', toggleMail);
added = true;
}
}
function removeMMEvents(){
if (added){
mobileBtn.off('click', toggleMobile, false);
mailBtn.off('click', toggleMail, false);
added = false;
}
}
function checkWidth(){
if (window.innerWidth > 991){
removeMMEvents();
showAll();
}else{
addMMEvents();
hideAll();
}
}
function hideAll(){
mobileText.hide();
mailText.hide();
}
function showAll(){
mobileText.show();
mailText.show();
}
function toggleMobile(){
mailText.hide();
mobileText.toggle();
}
function toggleMail(){
mobileText.hide();
mailText.toggle();
}
checkWidth();
win.on('resize', checkWidth);
})
// $( ".noUi-handle-lower .noUi-tooltip" ).change(function() {
// console.log(1)
// });
// $('.noUi-handle-lower .noUi-tooltip').bind("DOMSubtreeModified",function(){
// console.log(2)
// });
// $('.noUi-handle-lower .noUi-tooltip').bind("DOMSubtreeModified",function(){
// console.log(3)
// })
// jQuery(".noUi-handle-lower .noUi-tooltip").bind( 'DOMSubtreeModified',function(){ // отслеживаем изменение содержимого блока 2
// console.log(4)
// });
// var div = $('#slider-price-sale .noUi-handle-lower');
// div.on('click', function () {
// console.log(12311231321)
// console.log(this.innerHTML)
// })
// div.onchange = function(e){
// alert(this.innerHTML);
// }; | |
app.module.ts | import { BrowserModule } from '@angular/platform-browser';
import { NgModule } from '@angular/core';
import { ReactiveFormsModule, FormsModule } from '@angular/forms';
import { AppRoutingModule } from './app-routing.module';
import { AppComponent } from './app.component';
import { RegistrationFormComponent } from './components/registration-form/registration-form.component';
import { PageRegistrationComponent } from './components/page-registration/page-registration.component';
import { PageHomeComponent } from './components/page-home/page-home.component';
import { ListDataComponent } from './components/list-data/list-data.component';
import { DolarSignPipe } from './pipes/dolar-sign.pipe';
import { QuickFilterPipe } from './pipes/quick-filter.pipe';
import { ExportFileDirective } from './directives/export-file.directive';
import { AuthService } from './services/auth.service';
import { DataManagerService } from './services/data-manager.service';
@NgModule({
declarations: [
AppComponent, | DolarSignPipe,
QuickFilterPipe,
ExportFileDirective
],
imports: [
BrowserModule,
ReactiveFormsModule,
FormsModule,
AppRoutingModule
],
providers: [AuthService, DataManagerService],
bootstrap: [AppComponent]
})
export class AppModule { } | RegistrationFormComponent,
PageRegistrationComponent,
PageHomeComponent,
ListDataComponent, |
spinner-theme.component.ts | import { Component, OnInit } from '@angular/core';
@Component({
selector: 'spinner-theme',
templateUrl: './spinner-theme.component.html', | })
export class SpinnerThemeComponent implements OnInit {
constructor() { }
ngOnInit() {
}
} | styleUrls: ['./spinner-theme.component.scss'] |
influxdb_return.py | # -*- coding: utf-8 -*-
'''
Return data to an influxdb server.
.. versionadded:: 2015.8.0
To enable this returner the minion will need the python client for influxdb
installed and the following values configured in the minion or master
config, these are the defaults:
.. code-block:: yaml
influxdb.db: 'salt'
influxdb.user: 'salt'
influxdb.password: 'salt'
influxdb.host: 'localhost'
influxdb.port: 8086
Alternative configuration values can be used by prefacing the configuration.
Any values not found in the alternative configuration will be pulled from
the default location:
.. code-block:: yaml
alternative.influxdb.db: 'salt'
alternative.influxdb.user: 'salt'
alternative.influxdb.password: 'salt'
alternative.influxdb.host: 'localhost'
alternative.influxdb.port: 6379
To use the influxdb returner, append '--return influxdb' to the salt command.
.. code-block:: bash
salt '*' test.ping --return influxdb
To use the alternative configuration, append '--return_config alternative' to the salt command.
.. code-block:: bash
salt '*' test.ping --return influxdb --return_config alternative
'''
from __future__ import absolute_import
# Import python libs
import json
import logging
# Import Salt libs
import salt.utils.jid
import salt.returners
# Import third party libs
try:
import influxdb.influxdb08
HAS_INFLUXDB = True
except ImportError:
HAS_INFLUXDB = False
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'influxdb'
def __virtual__():
if not HAS_INFLUXDB:
return False
return __virtualname__
def _get_options(ret=None):
'''
Get the influxdb options from salt.
'''
attrs = {'host': 'host',
'port': 'port',
'db': 'db',
'user': 'user',
'password': 'password'}
_options = salt.returners.get_returner_options(__virtualname__,
ret,
attrs,
__salt__=__salt__,
__opts__=__opts__)
return _options
def _get_serv(ret=None):
|
def returner(ret):
'''
Return data to a influxdb data store
'''
serv = _get_serv(ret)
req = [
{
'name': 'returns',
'columns': ['fun', 'id', 'jid', 'return', 'full_ret'],
'points': [
[ret['fun'], ret['id'], ret['jid'], json.dumps(ret['return']), json.dumps(ret)]
],
}
]
try:
serv.write_points(req)
except Exception as ex:
log.critical('Failed to store return with InfluxDB returner: {0}'.format(ex))
def save_load(jid, load):
'''
Save the load to the specified jid
'''
serv = _get_serv(ret=None)
req = [
{
'name': 'jids',
'columns': ['jid', 'load'],
'points': [
[jid, json.dumps(load)]
],
}
]
try:
serv.write_points(req)
except Exception as ex:
log.critical('Failed to store load with InfluxDB returner: {0}'.format(ex))
def save_minions(jid, minions): # pylint: disable=unused-argument
'''
Included for API consistency
'''
pass
def get_load(jid):
'''
Return the load data that marks a specified jid
'''
serv = _get_serv(ret=None)
sql = "select load from jids where jid = '{0}'".format(jid)
log.debug(">> Now in get_load {0}".format(jid))
data = serv.query(sql)
log.debug(">> Now Data: {0}".format(data))
if data:
return data
return {}
def get_jid(jid):
'''
Return the information returned when the specified job id was executed
'''
serv = _get_serv(ret=None)
sql = "select id, full_ret from returns where jid = '{0}'".format(jid)
data = serv.query(sql)
ret = {}
if data:
points = data[0]['points']
for point in points:
ret[point[3]] = json.loads(point[2])
return ret
def get_fun(fun):
'''
Return a dict of the last function called for all minions
'''
serv = _get_serv(ret=None)
sql = '''select first(id) as fid, first(full_ret) as fret
from returns
where fun = '{0}'
group by fun, id
'''.format(fun)
data = serv.query(sql)
ret = {}
if data:
points = data[0]['points']
for point in points:
ret[point[1]] = json.loads(point[2])
return ret
def get_jids():
'''
Return a list of all job ids
'''
serv = _get_serv(ret=None)
sql = "select distinct(jid) from jids"
# [{u'points': [[0, u'saltdev']], u'name': u'returns', u'columns': [u'time', u'distinct']}]
data = serv.query(sql)
ret = []
if data:
for jid in data[0]['points']:
ret.append(jid[1])
return ret
def get_minions():
'''
Return a list of minions
'''
serv = _get_serv(ret=None)
sql = "select distinct(id) from returns"
data = serv.query(sql)
ret = []
if data:
for jid in data[0]['points']:
ret.append(jid[1])
return ret
def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument
'''
Do any work necessary to prepare a JID, including sending a custom id
'''
return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid()
| '''
Return an influxdb client object
'''
_options = _get_options(ret)
host = _options.get('host')
port = _options.get('port')
database = _options.get('db')
user = _options.get('user')
password = _options.get('password')
return influxdb.influxdb08.InfluxDBClient(host=host,
port=port,
username=user,
password=password,
database=database) |
test_db_sqlite.py | from datetime import datetime
from dino.config import UserKeys, RedisKeys, SessionKeys
from dino.db.rdbms.models import Channels
from dino.db.rdbms.models import Rooms
from test.base import BaseTest
from test.db import BaseDatabaseTest
class DatabaseSqliteTest(BaseDatabaseTest):
def setUp(self):
self.set_up_env('sqlite')
def tearDown(self):
from dino.db.rdbms.dbman import Database
from dino.db.rdbms.dbman import DeclarativeBase
db = Database(self.env)
con = db.engine.connect()
trans = con.begin()
for table in reversed(DeclarativeBase.metadata.sorted_tables):
con.execute(table.delete())
trans.commit()
con.close()
self.env.cache._flushall()
def test_get_user_infos(self):
self.db.set_user_info(BaseTest.USER_ID, {SessionKeys.gender.value: 'm', 'last_login': datetime.utcnow()})
self.db.set_user_info(BaseTest.OTHER_USER_ID, {SessionKeys.gender.value: 'w', 'last_login': datetime.utcnow()})
self.env.auth.redis.delete(RedisKeys.auth_key(BaseTest.USER_ID))
self.env.auth.redis.delete(RedisKeys.auth_key(BaseTest.OTHER_USER_ID))
infos = self.db.get_user_infos({BaseTest.USER_ID, BaseTest.OTHER_USER_ID})
self.assertEqual('m', infos[BaseTest.USER_ID][SessionKeys.gender.value])
self.assertEqual('w', infos[BaseTest.OTHER_USER_ID][SessionKeys.gender.value])
def test_set_two_owners_on_room(self):
self._test_set_two_owners_on_room()
def test_is_admin_before_create(self):
self._test_is_admin_before_create()
def test_is_admin_after_create(self):
self._test_is_admin_after_create()
def test_is_admin_after_create_set_admin(self):
self._test_is_admin_after_create_set_admin()
def test_channel_for_room_no_channel(self):
self._test_channel_for_room_no_channel()
def test_channel_for_room_with_channel_without_room(self):
self._test_channel_for_room_with_channel_without_room()
def test_channel_for_room_with_channel_with_room(self):
self._test_channel_for_room_with_channel_with_room()
def test_leave_room_not_joined(self):
self._test_leave_room_not_joined()
def test_leave_room_joined(self):
self._test_leave_room_joined()
def test_set_moderator_no_room(self):
self._test_set_moderator_no_room()
def test_set_moderator_with_room(self):
self._test_set_moderator_with_room()
def test_set_room_owner_no_room(self):
self._test_set_room_owner_no_room()
def test_set_room_owner_with_room(self):
self._test_set_room_owner_with_room()
def test_set_channel_owner_no_channel(self):
self._test_set_channel_owner_no_channel()
def test_set_channel_owner_with_channel(self):
self._test_set_channel_owner_with_channel()
def test_get_user_status_before_set(self):
self._test_get_user_status_before_set(UserKeys.STATUS_UNAVAILABLE)
def test_set_user_offline(self):
self._test_set_user_offline(UserKeys.STATUS_UNAVAILABLE)
def test_set_user_online(self):
self._test_set_user_online(UserKeys.STATUS_AVAILABLE)
def test_set_user_invisible(self):
self._test_set_user_invisible(UserKeys.STATUS_INVISIBLE)
def test_remove_current_rooms_for_user_before_joining(self):
self._test_remove_current_rooms_for_user_before_joining()
def test_remove_current_rooms_for_user_after_joining(self):
self._test_remove_current_rooms_for_user_after_joining()
def test_rooms_for_user_before_joining(self):
self._test_rooms_for_user_before_joining()
def test_create_existing_room_name(self):
self._test_create_existing_room_name()
def test_rooms_for_user_after_joining(self):
|
def test_rooms_for_channel_before_create_channel(self):
self._test_rooms_for_channel_before_create_channel()
def test_rooms_for_channel_after_create_channel_before_create_room(self):
self._test_rooms_for_channel_after_create_channel_before_create_room()
def test_rooms_for_channel_after_create_channel_after_create_room(self):
self._test_rooms_for_channel_after_create_channel_after_create_room()
def test_get_channels_before_create(self):
self._test_get_channels_before_create()
def test_get_channels_after_create(self):
self._test_get_channels_after_create()
def test_room_exists(self):
self._test_room_exists()
def test_create_room_no_channel(self):
self._test_create_room_no_channel()
def test_create_existing_channel(self):
self._test_create_existing_channel()
def test_create_channel(self):
self._test_create_channel()
def test_create_channel_again_to_make_sure_tables_cleared_after_each_test(self):
self._test_create_channel()
channels = self.db._session().query(Channels).filter(Channels.uuid == BaseDatabaseTest.CHANNEL_ID).all()
self.assertEqual(1, len(channels))
def test_create_channel_blank_name(self):
self._test_create_channel_blank_name()
def test_create_channel_exists(self):
self._test_create_channel_exists()
def test_create_room(self):
self._test_create_room()
rooms = self.db._session().query(Rooms).filter(Rooms.uuid == BaseDatabaseTest.ROOM_ID).all()
self.assertEqual(1, len(rooms))
def test_create_room_blank_name(self):
self._test_create_room_blank_name()
def test_create_existing_room(self):
self._test_create_existing_room()
def test_channel_exists_after_create(self):
self._test_channel_exists_after_create()
def test_channel_exists_before_create(self):
self._test_channel_exists_before_create()
def test_room_name_exists_before_create(self):
self._test_room_name_exists_before_create()
def test_room_name_exists_after_create(self):
self._test_room_name_exists_after_create()
def test_delete_one_non_existing_acl(self):
self._test_delete_one_non_existing_acl()
def test_add_one_extra_acl(self):
self._test_add_one_extra_acl()
def test_get_acl(self):
self._test_get_acl()
def test_set_acl(self):
self._test_set_acl()
def test_delete_one_acl(self):
self._test_delete_one_acl()
def test_set_room_allows_cross_group_messaging(self):
self._test_set_room_allows_cross_group_messaging()
def test_get_room_allows_cross_group_messaging_no_room(self):
self._test_get_room_allows_cross_group_messaging_no_room()
def test_get_room_allows_cross_group_messaging(self):
self._test_get_room_allows_cross_group_messaging()
def test_get_room_does_not_allow_cross_group_messaging(self):
self._test_get_room_does_not_allow_cross_group_messaging()
def test_room_allows_cross_group_messaging_no_room(self):
self._test_room_allows_cross_group_messaging_no_room()
def test_room_allows_cross_group_messaging(self):
self._test_room_allows_cross_group_messaging()
def test_room_does_not_allow_cross_group_messaging_no_room(self):
self._test_room_does_not_allow_cross_group_messaging_no_room()
def test_create_admin_room(self):
self._test_create_admin_room()
def test_is_super_user(self):
self._test_is_super_user()
def test_get_admin_room(self):
self._test_get_admin_room()
def test_set_owner_and_moderator(self):
self._test_set_owner_and_moderator()
def test_remove_channel_role(self):
self._test_remove_channel_role()
def test_remove_room_role(self):
self._test_remove_room_role()
def test_remove_super_user(self):
self._test_remove_super_user()
def test_get_super_users(self):
self._test_get_super_users()
def test_remove_owner(self):
self._test_remove_owner()
def test_remove_channel_owner(self):
self._test_remove_channel_owner()
def test_remove_admin(self):
self._test_remove_admin()
def test_remove_moderator(self):
self._test_remove_moderator()
def test_set_owner_is_unique(self):
self._test_set_owner_is_unique()
def test_set_owner_channel_is_unique(self):
self._test_set_owner_channel_is_unique()
def test_set_moderator_is_unique(self):
self._test_set_moderator_is_unique()
def test_set_admin_is_unique(self):
self._test_set_admin_is_unique()
def test_set_super_user_is_unique(self):
self._test_set_super_user_is_unique()
def test_remove_super_user_without_setting(self):
self._test_remove_super_user_without_setting()
def test_remove_owner_without_setting(self):
self._test_remove_owner_without_setting()
def test_remove_channel_owner_without_setting(self):
self._test_remove_channel_owner_without_setting()
def test_remove_admin_without_setting(self):
self._test_remove_admin_without_setting()
def test_remove_moderator_without_setting(self):
self._test_remove_moderator_without_setting()
def test_remove_other_role_channel(self):
self._test_remove_other_role_channel()
def test_remove_other_role_room(self):
self._test_remove_other_role_room()
def test_set_admin_no_such_channel(self):
self._test_set_admin_no_such_channel()
def test_remove_admin_no_such_channel(self):
self._test_remove_admin_no_such_room()
def test_remove_moderator_no_such_room(self):
self._test_remove_moderator_no_such_room()
def test_channel_name_exists(self):
self._test_channel_name_exists()
def test_channel_exists(self):
self._test_channel_exists()
def test_create_user(self):
self._test_create_user()
def test_users_in_room(self):
self._test_users_in_room()
def test_delete_acl_in_channel_for_action(self):
self._test_delete_acl_in_channel_for_action()
def test_delete_acl_in_room_for_action(self):
self._test_delete_acl_in_room_for_action()
def test_remove_owner_channel_no_channel(self):
self._test_remove_owner_channel_no_channel()
def test_remove_owner_channel_not_owner(self):
self._test_remove_owner_channel_not_owner()
def test_remove_owner_channel_is_owner(self):
self._test_remove_owner_channel_is_owner()
def test_create_user_exists(self):
self._test_create_user_exists()
def test_update_acl_in_room_for_action(self):
self._test_update_acl_in_room_for_action()
def test_update_acl_in_room_for_action_no_channel(self):
self._test_update_acl_in_room_for_action_no_channel()
def test_update_acl_in_room_for_action_no_room(self):
self._test_update_acl_in_room_for_action_no_room()
def test_update_acl_in_room_for_action_invalid_action(self):
self._test_update_acl_in_room_for_action_invalid_action()
def test_update_acl_in_room_for_action_invalid_type(self):
self._test_update_acl_in_room_for_action_invalid_type()
def test_update_acl_in_room_for_action_invalid_value(self):
self._test_update_acl_in_room_for_action_invalid_value()
def test_update_acl_in_channel_for_action(self):
self._test_update_acl_in_channel_for_action()
def test_update_acl_in_channel_for_action_no_channel(self):
self._test_update_acl_in_channel_for_action_no_channel()
def test_update_acl_in_channel_for_action_invalid_action(self):
self._test_update_acl_in_channel_for_action_invalid_action()
def test_update_acl_in_channel_for_action_invalid_type(self):
self._test_update_acl_in_channel_for_action_invalid_type()
def test_update_acl_in_channel_for_action_invalid_value(self):
self._test_update_acl_in_channel_for_action_invalid_value()
def test_is_banned_from_channel(self):
self._test_is_banned_from_channel()
def test_is_banned_from_room(self):
self._test_is_banned_from_room()
def test_is_banned_globally(self):
self._test_is_banned_globally()
def test_remove_global_ban(self):
self._test_remove_global_ban()
def test_remove_channel_ban(self):
self._test_remove_channel_ban()
def test_remove_room_ban(self):
self._test_remove_room_ban()
def test_was_banned_globally(self):
self._test_was_banned_globally()
def test_was_banned_from_room(self):
self._test_was_banned_from_room()
def test_was_banned_from_channel(self):
self._test_was_banned_from_channel()
def test_get_user_ban_status_channel(self):
self._test_get_user_ban_status_channel()
def test_get_user_ban_status_room(self):
self._test_get_user_ban_status_room()
def test_get_user_ban_status_global(self):
self._test_get_user_ban_status_global()
def test_get_banned_users_global_not_empty_after_ban(self):
self._test_get_banned_users_global_not_empty_after_ban()
def test_get_banned_users_global_is_empty(self):
self._test_get_banned_users_global_is_empty()
def test_get_banned_users_global_is_empty_if_expired(self):
self._test_get_banned_users_global_is_empty_if_expired()
def test_get_banned_users_channel_not_empty_after_ban(self):
self._test_get_banned_users_channel_not_empty_after_ban()
def test_get_banned_users_channel_is_empty(self):
self._test_get_banned_users_channel_is_empty()
def test_get_banned_users_channel_is_empty_if_expired(self):
self._test_get_banned_users_channel_is_empty_if_expired()
def test_get_banned_users_room_not_empty_after_ban(self):
self._test_get_banned_users_room_not_empty_after_ban()
def test_get_banned_users_room_is_empty(self):
self._test_get_banned_users_room_is_empty()
def test_get_banned_users_room_is_empty_if_expired(self):
self._test_get_banned_users_room_is_empty_if_expired()
def test_get_banned_users_is_empty(self):
self._test_get_banned_users_is_empty()
def test_get_banned_users_for_room(self):
self._test_get_banned_users_for_room()
def test_get_banned_users_for_channel(self):
self._test_get_banned_users_for_channel()
def test_get_banned_users_globally(self):
self._test_get_banned_users_globally()
def test_get_global_ban_timestamp_is_none(self):
self._test_get_global_ban_timestamp_is_none()
def test_get_global_ban_timestamp_not_none(self):
self._test_get_global_ban_timestamp_not_none()
def test_get_global_ban_timestamp_empty_if_expired(self):
self._test_get_global_ban_timestamp_not_empty_if_expired()
def test_get_channel_ban_timestamp_is_none(self):
self._test_get_channel_ban_timestamp_is_none()
def test_get_channel_ban_timestamp_not_none(self):
self._test_get_channel_ban_timestamp_not_none()
def test_get_channel_ban_timestamp_empty_if_expired(self):
self._test_get_channel_ban_timestamp_not_empty_if_expired()
def test_get_room_ban_timestamp_is_none(self):
self._test_get_room_ban_timestamp_is_none()
def test_get_room_ban_timestamp_not_none(self):
self._test_get_room_ban_timestamp_not_none()
def test_get_room_ban_timestamp_empty_if_expired(self):
self._test_get_room_ban_timestamp_not_empty_if_expired()
def test_get_acls_in_channel_for_action_no_channel(self):
self._test_get_acls_in_channel_for_action_no_channel()
def test_get_acls_in_channel_for_action_no_room(self):
self._test_get_acls_in_channel_for_action_no_room()
def test_get_all_acls_channel_is_empty(self):
self._test_get_all_acls_channel_is_empty()
def test_get_all_acls_channel_not_empty(self):
self._test_get_all_acls_channel_not_empty()
def test_get_all_acls_room_is_empty(self):
self._test_get_all_acls_room_is_empty()
def test_get_all_acls_room_not_empty(self):
self._test_get_all_acls_room_not_empty()
def test_channel_for_room_blank_room_id(self):
self._test_channel_for_room_blank_room_id()
def test_channel_for_room_before_create(self):
self._test_channel_for_room_before_create()
def test_channel_for_room_after_create(self):
self._test_channel_for_room_after_create()
def test_channel_for_room_cache(self):
self._test_channel_for_room_cache()
def test_get_username_before_set(self):
self._test_get_username_before_set()
def test_get_username_after_set(self):
self._test_get_username_after_set()
def test_rename_channel(self):
self._test_rename_channel()
def test_rename_channel_before_create(self):
self._test_rename_channel_before_create()
def test_rename_channel_empty_name(self):
self._test_rename_channel_empty_name()
def test_rename_room(self):
self._test_rename_room()
def test_rename_room_before_create_channel(self):
self._test_rename_room_before_create_channel()
def test_rename_room_before_create_room(self):
self._test_rename_room_before_create_room()
def test_rename_room_empty_name(self):
self._test_rename_room_empty_name()
def test_rename_room_already_exists(self):
self._test_rename_room_already_exists()
def test_remove_room(self):
self._test_remove_room()
def test_remove_room_before_create_channel(self):
self._test_remove_room_before_create_channel()
def test_remove_room_before_create_room(self):
self._test_remove_room_before_create_room()
def test_admin_room_for_channel_before_exists(self):
self._test_admin_room_before_exists()
def test_admin_room_for_channel_get_from_cache(self):
self._test_admin_room_get_from_cache()
def test_room_exists_from_cache(self):
self._test_room_exists_from_cache()
def test_get_user_status_from_cache(self):
self._test_get_user_status_from_cache()
def test_get_user_status_after_set(self):
self._test_get_user_status_after_set()
def test_set_user_invisible_twice_ignores_second(self):
self._test_set_user_invisible_twice_ignores_second()
def test_set_user_offline_twice_ignores_second(self):
self._test_set_user_offline_twice_ignores_second()
def test_set_user_online_twice_ignores_second(self):
self._test_set_user_online_twice_ignores_second()
def test_users_in_room_after_join(self):
self._test_users_in_room_after_join()
def test_set_user_offline_after_online(self):
self._test_set_user_offline_after_online()
def test_room_contains_before_create_channel(self):
self._test_room_contains_before_create_channel()
def test_room_contains_before_create_room(self):
self._test_room_contains_before_create_room()
def test_room_contains_after_create(self):
self._test_room_contains_after_create()
def test_room_contains_after_join(self):
self._test_room_contains_after_join()
def test_room_name_exists_from_cache_after_create(self):
self._test_room_name_exists_from_cache_after_create()
def test_rename_channel_exists(self):
self._test_rename_channel_exists()
def test_channel_for_room_from_cache(self):
self._test_channel_for_room_from_cache()
def test_leave_room_before_create(self):
self._test_leave_room_before_create()
def test_remove_moderator_twice(self):
self._test_remove_moderator_twice()
def test_set_owner_channel_after_removing_owner(self):
self._test_set_owner_channel_after_removing_owner()
def test_delete_acl_in_channel_for_action_invalid_action(self):
self._test_delete_acl_in_channel_for_action_invalid_action()
def test_delete_acl_in_room_for_action_invalid_action(self):
self._test_delete_acl_in_room_for_action_invalid_action()
def test_delete_acl_in_channel_for_action_after_create(self):
self._test_delete_acl_in_channel_for_action_after_create()
def test_delete_acl_in_room_for_action_after_create(self):
self._test_delete_acl_in_room_for_action_after_create()
def test_update_acl(self):
self._test_update_acl()
def test_get_all_acls_channel(self):
self._test_get_all_acls_channel()
def test_get_all_acls_channel_before_create(self):
self._test_get_all_acls_channel_before_create()
def test_get_all_acls_room(self):
self._test_get_all_acls_room()
def test_get_all_acls_room_before_create(self):
self._test_get_all_acls_room_before_create()
def test_update_last_read_for(self):
self._test_update_last_read_for()
def test_update_username(self):
self._test_update_username()
def test_get_room_name_from_cache(self):
self._test_get_room_name_from_cache()
def test_get_channel_name_from_cache(self):
self._test_get_channel_name_from_cache()
def test_is_banned_globally_after_clearing_cache(self):
self._test_is_banned_globally_after_clearing_cache()
def test_is_banned_globally_after_clearing_cache_if_expired(self):
self._test_is_banned_globally_after_clearing_cache_if_expired()
def test_is_banned_from_room_after_clearing_cache(self):
self._test_is_banned_from_room_after_clearing_cache()
def test_is_banned_from_room_after_clearing_cache_if_expired(self):
self._test_is_banned_from_room_after_clearing_cache_if_expired()
def test_is_banned_from_channel_after_clearing_cache(self):
self._test_is_banned_from_channel_after_clearing_cache()
def test_is_banned_from_channel_after_clearing_cache_if_expired(self):
self._test_is_banned_from_channel_after_clearing_cache_if_expired()
| self._test_rooms_for_user_after_joining() |
models.go | // Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v20180813
import (
"encoding/json"
tcerr "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/errors"
tchttp "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/http"
)
type ApiKey struct {
// 密钥ID
SecretId *string `json:"SecretId,omitempty" name:"SecretId"`
// 创建时间(时间戳)
CreateTime *uint64 `json:"CreateTime,omitempty" name:"CreateTime"`
// 状态(2:有效, 3:禁用, 4:已删除)
Status *uint64 `json:"Status,omitempty" name:"Status"`
}
type AssumeRoleRequest struct {
*tchttp.BaseRequest
// 角色的资源描述,可在[访问管理](https://console.cloud.tencent.com/cam/role),点击角色名获取。
// 普通角色:
// qcs::cam::uin/12345678:role/4611686018427397919、qcs::cam::uin/12345678:roleName/testRoleName
// 服务角色:
// qcs::cam::uin/12345678:role/tencentcloudServiceRole/4611686018427397920、qcs::cam::uin/12345678:role/tencentcloudServiceRoleName/testServiceRoleName
RoleArn *string `json:"RoleArn,omitempty" name:"RoleArn"`
// 临时会话名称,由用户自定义名称。
// 长度在2到128之间,可包含大小写字符,数字以及特殊字符:=,.@_-。 正则为:[\w+=,.@_-]*
RoleSessionName *string `json:"RoleSessionName,omitempty" name:"RoleSessionName"`
// 指定临时证书的有效期,单位:秒,默认 7200 秒,最长可设定有效期为 43200 秒
DurationSeconds *uint64 `json:"DurationSeconds,omitempty" name:"DurationSeconds"`
// 策略描述
// 注意:
// 1、policy 需要做 urlencode(如果通过 GET 方法请求云 API,发送请求前,所有参数都需要按照[云 API 规范](https://cloud.tencent.com/document/api/598/33159#1.-.E6.8B.BC.E6.8E.A5.E8.A7.84.E8.8C.83.E8.AF.B7.E6.B1.82.E4.B8.B2)再 urlencode 一次)。
// 2、策略语法参照[ CAM 策略语法](https://cloud.tencent.com/document/product/598/10603)。
// 3、策略中不能包含 principal 元素。
Policy *string `json:"Policy,omitempty" name:"Policy"`
// 角色外部ID,可在[访问管理](https://console.cloud.tencent.com/cam/role),点击角色名获取。
// 长度在2到128之间,可包含大小写字符,数字以及特殊字符:=,.@:/-。 正则为:[\w+=,.@:\/-]*
ExternalId *string `json:"ExternalId,omitempty" name:"ExternalId"`
}
func (r *AssumeRoleRequest) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
// FromJsonString It is highly **NOT** recommended to use this function
// because it has no param check, nor strict type check
func (r *AssumeRoleRequest) FromJsonString(s string) error {
f := make(map[string]interface{})
if err := json.Unmarshal([]byte(s), &f); err != nil {
return err
}
delete(f, "RoleArn")
delete(f, "RoleSessionName")
delete(f, "DurationSeconds")
delete(f, "Policy")
delete(f, "ExternalId")
if len(f) > 0 {
return tcerr.NewTencentCloudSDKError("ClientError.BuildRequestError", "AssumeRoleRequest has unknown keys!", "")
}
return json.Unmarshal([]byte(s), &r)
}
type AssumeRoleResponse struct {
*tchttp.BaseResponse
Response *struct {
// 临时安全证书
Credentials *Credentials `json:"Credentials,omitempty" name:"Credentials"`
// 证书无效的时间,返回 Unix 时间戳,精确到秒
ExpiredTime *int64 `json:"ExpiredTime,omitempty" name:"ExpiredTime"`
// 证书无效的时间,以 iso8601 格式的 UTC 时间表示
Expiration *string `json:"Expiration,omitempty" name:"Expiration"`
// 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
RequestId *string `json:"RequestId,omitempty" name:"RequestId"`
} `json:"Response"`
}
func (r *AssumeRoleResponse) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
// FromJsonString It is highly **NOT** recommended to use this function
// because it has no param check, nor strict type check
func (r *AssumeRoleResponse) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type AssumeRoleWithSAMLRequest struct {
*tchttp.BaseRequest
// base64 编码的 SAML 断言信息
SAMLAssertion *string `json:"SAMLAssertion,omitempty" name:"SAMLAssertion"`
// 扮演者访问描述名
PrincipalArn *string `json:"PrincipalArn,omitempty" name:"PrincipalArn"`
// 角色访问描述名
RoleArn *string `json:"RoleArn,omitempty" name:"RoleArn"`
// 会话名称
RoleSessionName *string `json:"RoleSessionName,omitempty" name:"RoleSessionName"`
// 指定临时证书的有效期,单位:秒,默认 7200 秒,最长可设定有效期为 43200 秒
DurationSeconds *uint64 `json:"DurationSeconds,omitempty" name:"DurationSeconds"`
}
func (r *AssumeRoleWithSAMLRequest) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
// FromJsonString It is highly **NOT** recommended to use this function
// because it has no param check, nor strict type check
func (r *AssumeRoleWithSAMLRequest) FromJsonString(s string) error {
f := make(map[string]interface{})
if err := json.Unmarshal([]byte(s), &f); err != nil {
return err
}
delete(f, "SAMLAssertion")
delete(f, "PrincipalArn")
delete(f, "RoleArn")
delete(f, "RoleSessionName")
delete(f, "DurationSeconds")
if len(f) > 0 {
return tcerr.NewTencentCloudSDKError("ClientError.BuildRequestError", "AssumeRoleWithSAMLRequest has unknown keys!", "")
}
return json.Unmarshal([]byte(s), &r)
}
type AssumeRoleWithSAMLResponse struct {
*tchttp.BaseResponse
Response *struct {
// 对象里面包含 Token,TmpSecretId,TmpSecretKey 三元组
Credentials *Credentials `json:"Credentials,omitempty" name:"Credentials"`
// 证书无效的时间,返回 Unix 时间戳,精确到秒
ExpiredTime *uint64 `json:"ExpiredTime,omitempty" name:"ExpiredTime"`
// 证书无效的时间,以 ISO8601 格式的 UTC 时间表示
Expiration *string `json:"Expiration,omitempty" name:"Expiration"`
// 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
RequestId *string `json:"RequestId,omitempty" name:"RequestId"`
} `json:"Response"`
}
func (r *AssumeRoleWithSAMLResponse) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
// FromJsonString It is highly **NOT** recommended to use this function
// because it has no param check, nor strict type check
func (r *AssumeRoleWithSAMLResponse) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type Credentials struct {
// token。token长度和绑定的策略有关,最长不超过4096字节。
Token *string `json:"Token,omitempty" name:"Token"`
// 临时证书密钥ID。最长不超过1024字节。
TmpSecretId *string `json:"TmpSecretId,omitempty" name:"TmpSecretId"`
// 临时证书密钥Key。最长不超过1024字节。
TmpSecretKey *string `json:"TmpSecretKey,omitempty" name:"TmpSecretKey"`
}
type GetCallerIdentityRequest struct {
*tchttp.BaseRequest
}
func (r *GetCallerIdentityRequest) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
// FromJsonString It is highly **NOT** recommended to use this function
// because it has no param check, nor strict type check
func (r *GetCallerIdentityRequest) FromJsonString(s string) error {
f := make(map[string]interface{})
if err := json.Unmarshal([]byte(s), &f); err != nil {
return err
}
if len(f) > 0 {
return tcerr.NewTencentCloudSDKError("ClientError.BuildRequestError", "GetCallerIdentityRequest has unknown keys!", "")
}
return json.Unmarshal([]byte(s), &r)
}
type GetCallerIdentityResponse struct { | Arn *string `json:"Arn,omitempty" name:"Arn"`
// 当前调用者所属主账号Uin。
AccountId *string `json:"AccountId,omitempty" name:"AccountId"`
// 身份标识。
// 1. 调用者是云账号时,返回的是当前账号Uin
// 2. 调用者是角色时,返回的是roleId:roleSessionName
// 3. 调用者是联合身份时,返回的是uin:federatedUserName
UserId *string `json:"UserId,omitempty" name:"UserId"`
// 密钥所属账号Uin。
// 1. 调用者是云账号,返回的当前账号Uin
// 2, 调用者是角色,返回的申请角色密钥的账号Uin
PrincipalId *string `json:"PrincipalId,omitempty" name:"PrincipalId"`
// 身份类型。
Type *string `json:"Type,omitempty" name:"Type"`
// 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
RequestId *string `json:"RequestId,omitempty" name:"RequestId"`
} `json:"Response"`
}
func (r *GetCallerIdentityResponse) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
// FromJsonString It is highly **NOT** recommended to use this function
// because it has no param check, nor strict type check
func (r *GetCallerIdentityResponse) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type GetFederationTokenRequest struct {
*tchttp.BaseRequest
// 您可以自定义调用方英文名称,由字母组成。
Name *string `json:"Name,omitempty" name:"Name"`
// 授予该临时证书权限的CAM策略
// 注意:
// 1、策略语法参照[ CAM 策略语法](https://cloud.tencent.com/document/product/598/10603)。
// 2、策略中不能包含 principal 元素。
// 3、该参数需要做urlencode。
Policy *string `json:"Policy,omitempty" name:"Policy"`
// 指定临时证书的有效期,单位:秒,默认1800秒,主账号最长可设定有效期为7200秒,子账号最长可设定有效期为129600秒。
DurationSeconds *uint64 `json:"DurationSeconds,omitempty" name:"DurationSeconds"`
}
func (r *GetFederationTokenRequest) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
// FromJsonString It is highly **NOT** recommended to use this function
// because it has no param check, nor strict type check
func (r *GetFederationTokenRequest) FromJsonString(s string) error {
f := make(map[string]interface{})
if err := json.Unmarshal([]byte(s), &f); err != nil {
return err
}
delete(f, "Name")
delete(f, "Policy")
delete(f, "DurationSeconds")
if len(f) > 0 {
return tcerr.NewTencentCloudSDKError("ClientError.BuildRequestError", "GetFederationTokenRequest has unknown keys!", "")
}
return json.Unmarshal([]byte(s), &r)
}
type GetFederationTokenResponse struct {
*tchttp.BaseResponse
Response *struct {
// 临时证书
Credentials *Credentials `json:"Credentials,omitempty" name:"Credentials"`
// 临时证书有效的时间,返回 Unix 时间戳,精确到秒
ExpiredTime *uint64 `json:"ExpiredTime,omitempty" name:"ExpiredTime"`
// 证书有效的时间,以 iso8601 格式的 UTC 时间表示
// 注意:此字段可能返回 null,表示取不到有效值。
Expiration *string `json:"Expiration,omitempty" name:"Expiration"`
// 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
RequestId *string `json:"RequestId,omitempty" name:"RequestId"`
} `json:"Response"`
}
func (r *GetFederationTokenResponse) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
// FromJsonString It is highly **NOT** recommended to use this function
// because it has no param check, nor strict type check
func (r *GetFederationTokenResponse) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type QueryApiKeyRequest struct {
*tchttp.BaseRequest
// 待查询的账号(不填默认查当前账号)
TargetUin *uint64 `json:"TargetUin,omitempty" name:"TargetUin"`
}
func (r *QueryApiKeyRequest) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
// FromJsonString It is highly **NOT** recommended to use this function
// because it has no param check, nor strict type check
func (r *QueryApiKeyRequest) FromJsonString(s string) error {
f := make(map[string]interface{})
if err := json.Unmarshal([]byte(s), &f); err != nil {
return err
}
delete(f, "TargetUin")
if len(f) > 0 {
return tcerr.NewTencentCloudSDKError("ClientError.BuildRequestError", "QueryApiKeyRequest has unknown keys!", "")
}
return json.Unmarshal([]byte(s), &r)
}
type QueryApiKeyResponse struct {
*tchttp.BaseResponse
Response *struct {
// 密钥ID列表
IdKeys []*ApiKey `json:"IdKeys,omitempty" name:"IdKeys"`
// 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
RequestId *string `json:"RequestId,omitempty" name:"RequestId"`
} `json:"Response"`
}
func (r *QueryApiKeyResponse) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
// FromJsonString It is highly **NOT** recommended to use this function
// because it has no param check, nor strict type check
func (r *QueryApiKeyResponse) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
} | *tchttp.BaseResponse
Response *struct {
// 当前调用者ARN。 |
job_manager_utils.py | #!/usr/bin/python
"""
(C) Copyright 2020 Intel Corporation.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE
The Government's rights to use, modify, reproduce, release, perform, display,
or disclose this software are subject to the terms of the Apache License as
provided in Contract No. B609815.
Any reproduction of computer software, computer software documentation, or
portions thereof marked with this legend must also reproduce the markings.
"""
from distutils.spawn import find_executable
import os
from command_utils import ExecutableCommand
from command_utils_base import FormattedParameter, EnvironmentVariables
from env_modules import load_mpi
from write_host_file import write_host_file
class JobManager(ExecutableCommand):
"""A class for commands with parameters that manage other commands."""
def __init__(self, namespace, command, job, path="", subprocess=False):
"""Create a JobManager object.
Args:
namespace (str): yaml namespace (path to parameters)
command (str): string of the command to be executed.
job (ExecutableCommand): command object to manage.
path (str, optional): path to location of command binary file.
Defaults to "".
subprocess (bool, optional): whether the command is run as a
subprocess. Defaults to False.
"""
super(JobManager, self).__init__(namespace, command, path, subprocess)
self.job = job
def __str__(self):
"""Return the command with all of its defined parameters as a string.
Returns:
str: the command with all the defined parameters
"""
commands = [super(JobManager, self).__str__(), str(self.job)]
return " ".join(commands)
def check_subprocess_status(self, sub_process):
"""Verify command status when called in a subprocess.
Args:
sub_process (process.SubProcess): subprocess used to run the command
Returns:
bool: whether or not the command progress has been detected
"""
return self.job.check_subprocess_status(sub_process)
# deprecated: Use assign_[hosts|processes|environment]() methods instead
def setup_command(self, env, hostfile, processes):
"""Set up the job manager command with common inputs.
Args:
env (EnvironmentVariables): the environment variables to use with
the launch command
hostfile (str): file defining host names and slots
processes (int): number of host processes
"""
pass
def assign_hosts(self, hosts, path=None, slots=None):
"""Assign the hosts to use with the command.
Set the appropriate command line parameter with the specified value. | path (str, optional): path to use when specifying the hosts through
a hostfile. Defaults to None.
slots (int, optional): number of slots per host to specify in the
optional hostfile. Defaults to None.
"""
pass
def assign_processes(self, processes):
"""Assign the number of processes per node.
Set the appropriate command line parameter with the specified value.
Args:
processes (int): number of processes per node
"""
pass
def assign_environment(self, env_vars, append=False):
"""Assign or add environment variables to the command.
Args:
env_vars (EnvironmentVariables): the environment variables to use
assign or add to the command
append (bool): whether to assign (False) or append (True) the
specified environment variables
"""
pass
def assign_environment_default(self, env_vars):
"""Assign the default environment variables for the command.
Args:
env_vars (EnvironmentVariables): the environment variables to
assign as the default
"""
pass
class Orterun(JobManager):
"""A class for the orterun job manager command."""
def __init__(self, job, subprocess=False):
"""Create a Orterun object.
Args:
job (ExecutableCommand): command object to manage.
subprocess (bool, optional): whether the command is run as a
subprocess. Defaults to False.
"""
load_mpi("openmpi")
path = os.path.dirname(find_executable("orterun"))
super(Orterun, self).__init__(
"/run/orterun", "orterun", job, path, subprocess)
# Default mca values to avoid queue pair errors
mca_default = {
"btl_openib_warn_default_gid_prefix": "0",
"btl": "tcp,self",
"oob": "tcp",
"pml": "ob1",
}
self.hostfile = FormattedParameter("--hostfile {}", None)
self.processes = FormattedParameter("--np {}", 1)
self.display_map = FormattedParameter("--display-map", False)
self.map_by = FormattedParameter("--map-by {}", "node")
self.export = FormattedParameter("-x {}", None)
self.enable_recovery = FormattedParameter("--enable-recovery", True)
self.report_uri = FormattedParameter("--report-uri {}", None)
self.allow_run_as_root = FormattedParameter("--allow-run-as-root", None)
self.mca = FormattedParameter("--mca {}", mca_default)
self.pprnode = FormattedParameter("--map-by ppr:{}:node", None)
self.tag_output = FormattedParameter("--tag-output", True)
self.ompi_server = FormattedParameter("--ompi-server {}", None)
# deprecated: Use assign_[hosts|processes|environment]() methods instead
def setup_command(self, env, hostfile, processes):
"""Set up the orterun command with common inputs.
Args:
env (EnvironmentVariables): the environment variables to use with
the launch command
hostfile (str): file defining host names and slots
processes (int): number of host processes
"""
# Setup the env for the job to export with the orterun command
if self.export.value is None:
self.export.value = []
self.export.value.extend(env.get_list())
# Setup the orterun command
self.hostfile.value = hostfile
self.processes.value = processes
def assign_hosts(self, hosts, path=None, slots=None):
"""Assign the hosts to use with the command (--hostfile).
Args:
hosts (list): list of hosts to specify in the hostfile
path (str, optional): hostfile path. Defaults to None.
slots (int, optional): number of slots per host to specify in the
hostfile. Defaults to None.
"""
kwargs = {"hostlist": hosts, "slots": slots}
if path is not None:
kwargs["path"] = path
self.hostfile.value = write_host_file(**kwargs)
def assign_processes(self, processes):
"""Assign the number of processes per node (-np).
Args:
processes (int): number of processes per node
"""
self.processes.value = processes
def assign_environment(self, env_vars, append=False):
"""Assign or add environment variables to the command.
Args:
env_vars (EnvironmentVariables): the environment variables to use
assign or add to the command
append (bool): whether to assign (False) or append (True) the
specified environment variables
"""
if append and self.export.value is not None:
# Convert the current list of environmental variable assignments
# into an EnvironmentVariables (dict) object. Then update the
# dictionary keys with the specified values or add new key value
# pairs to the dictionary. Finally convert the updated dictionary
# back to a list for the parameter assignment.
original = EnvironmentVariables({
item.split("=")[0]: item.split("=")[1] if "=" in item else None
for item in self.export.value})
original.update(env_vars)
self.export.value = original.get_list()
else:
# Overwrite the environmental variable assignment
self.export.value = env_vars.get_list()
def assign_environment_default(self, env_vars):
"""Assign the default environment variables for the command.
Args:
env_vars (EnvironmentVariables): the environment variables to
assign as the default
"""
self.export.update_default(env_vars.get_list())
def run(self):
"""Run the orterun command.
Raises:
CommandFailure: if there is an error running the command
"""
load_mpi("openmpi")
return super(Orterun, self).run()
class Mpirun(JobManager):
"""A class for the mpirun job manager command."""
def __init__(self, job, subprocess=False, mpitype="openmpi"):
"""Create a Mpirun object.
Args:
job (ExecutableCommand): command object to manage.
subprocess (bool, optional): whether the command is run as a
subprocess. Defaults to False.
"""
load_mpi(mpitype)
path = os.path.dirname(find_executable("mpirun"))
super(Mpirun, self).__init__(
"/run/mpirun", "mpirun", job, path, subprocess)
self.hostfile = FormattedParameter("-hostfile {}", None)
self.processes = FormattedParameter("-np {}", 1)
self.ppn = FormattedParameter("-ppn {}", None)
self.envlist = FormattedParameter("-envlist {}", None)
self.mpitype = mpitype
# deprecated: Use assign_[hosts|processes|environment]() methods instead
def setup_command(self, env, hostfile, processes):
"""Set up the mpirun command with common inputs.
Args:
env (EnvironmentVariables): the environment variables to use with
the launch command
hostfile (str): file defining host names and slots
processes (int): number of host processes
"""
# Setup the env for the job to export with the mpirun command
self._pre_command = env.get_export_str()
# Setup the orterun command
self.hostfile.value = hostfile
self.processes.value = processes
def assign_hosts(self, hosts, path=None, slots=None):
"""Assign the hosts to use with the command (-f).
Args:
hosts (list): list of hosts to specify in the hostfile
path (str, optional): hostfile path. Defaults to None.
slots (int, optional): number of slots per host to specify in the
hostfile. Defaults to None.
"""
kwargs = {"hostlist": hosts, "slots": slots}
if path is not None:
kwargs["path"] = path
self.hostfile.value = write_host_file(**kwargs)
def assign_processes(self, processes):
"""Assign the number of processes per node (-np).
Args:
processes (int): number of processes per node
"""
self.processes.value = processes
def assign_environment(self, env_vars, append=False):
"""Assign or add environment variables to the command.
Args:
env_vars (EnvironmentVariables): the environment variables to use
assign or add to the command
append (bool): whether to assign (False) or append (True) the
specified environment variables
"""
if append and self.envlist.value is not None:
# Convert the current list of environmental variable assignments
# into an EnvironmentVariables (dict) object. Then update the
# dictionary keys with the specified values or add new key value
# pairs to the dictionary. Finally convert the updated dictionary
# back to a string for the parameter assignment.
original = EnvironmentVariables({
item.split("=")[0]: item.split("=")[1] if "=" in item else None
for item in self.envlist.value.split(",")})
original.update(env_vars)
self.envlist.value = ",".join(original.get_list())
else:
# Overwrite the environmental variable assignment
self.envlist.value = ",".join(env_vars.get_list())
def assign_environment_default(self, env_vars):
"""Assign the default environment variables for the command.
Args:
env_vars (EnvironmentVariables): the environment variables to
assign as the default
"""
self.envlist.update_default(env_vars.get_list())
def run(self):
"""Run the mpirun command.
Raises:
CommandFailure: if there is an error running the command
"""
load_mpi(self.mpitype)
return super(Mpirun, self).run()
class Srun(JobManager):
"""A class for the srun job manager command."""
def __init__(self, job, path="", subprocess=False):
"""Create a Srun object.
Args:
job (ExecutableCommand): command object to manage.
path (str, optional): path to location of command binary file.
Defaults to "".
subprocess (bool, optional): whether the command is run as a
subprocess. Defaults to False.
"""
super(Srun, self).__init__("/run/srun", "srun", job, path, subprocess)
self.label = FormattedParameter("--label", False)
self.mpi = FormattedParameter("--mpi={}", None)
self.export = FormattedParameter("--export={}", None)
self.ntasks = FormattedParameter("--ntasks={}", None)
self.distribution = FormattedParameter("--distribution={}", None)
self.nodefile = FormattedParameter("--nodefile={}", None)
self.nodelist = FormattedParameter("--nodelist={}", None)
self.ntasks_per_node = FormattedParameter("--ntasks-per-node={}", None)
self.reservation = FormattedParameter("--reservation={}", None)
self.partition = FormattedParameter("--partition={}", None)
self.output = FormattedParameter("--output={}", None)
# deprecated: Use assign_[hosts|processes|environment]() methods instead
def setup_command(self, env, hostfile, processes):
"""Set up the srun command with common inputs.
Args:
env (EnvironmentVariables): the environment variables to use with
the launch command
hostfile (str): file defining host names and slots
processes (int): number of host processes
processpernode (int): number of process per node
"""
# Setup the env for the job to export with the srun command
self.export.value = ",".join(["ALL"] + env.get_list())
# Setup the srun command
self.label.value = True
self.mpi.value = "pmi2"
if processes is not None:
self.ntasks.value = processes
self.distribution.value = "cyclic"
if hostfile is not None:
self.nodefile.value = hostfile
def assign_hosts(self, hosts, path=None, slots=None):
"""Assign the hosts to use with the command (-f).
Args:
hosts (list): list of hosts to specify in the hostfile
path (str, optional): hostfile path. Defaults to None.
slots (int, optional): number of slots per host to specify in the
hostfile. Defaults to None.
"""
kwargs = {"hostlist": hosts, "slots": None}
if path is not None:
kwargs["path"] = path
self.nodefile.value = write_host_file(**kwargs)
self.ntasks_per_node.value = slots
def assign_processes(self, processes):
"""Assign the number of processes per node (--ntasks).
Args:
processes (int): number of processes per node
"""
self.ntasks.value = processes
self.distribution.value = "cyclic"
def assign_environment(self, env_vars, append=False):
"""Assign or add environment variables to the command.
Args:
env_vars (EnvironmentVariables): the environment variables to use
assign or add to the command
append (bool): whether to assign (False) or append (True) the
specified environment variables
"""
if append and self.export.value is not None:
# Convert the current list of environmental variable assignments
# into an EnvironmentVariables (dict) object. Then update the
# dictionary keys with the specified values or add new key value
# pairs to the dictionary. Finally convert the updated dictionary
# back to a string for the parameter assignment.
original = EnvironmentVariables({
item.split("=")[0]: item.split("=")[1] if "=" in item else None
for item in self.export.value.split(",")})
original.update(env_vars)
self.export.value = ",".join(original.get_list())
else:
# Overwrite the environmental variable assignment
self.export.value = ",".join(env_vars.get_list())
def assign_environment_default(self, env_vars):
"""Assign the default environment variables for the command.
Args:
env_vars (EnvironmentVariables): the environment variables to
assign as the default
"""
self.export.update_default(env_vars.get_list()) |
Args:
hosts (list): list of hosts to specify on the command line |
main.go | package main
import (
"context"
"flag"
"log"
"net"
"github.com/wzshiming/commandproxy"
)
var network = "tcp"
func init() {
flag.StringVar(&network, "n", network, "network")
flag.Parse()
}
func main() | {
targets := flag.Args()
if len(targets) == 0 {
log.Fatalln("not target")
return
}
conn, err := net.Dial(network, targets[0])
if err != nil {
log.Fatalln(err)
return
}
var buf1, buf2 [32 * 1024]byte
err = commandproxy.Tunnel(context.Background(), commandproxy.Stdio, conn, buf1[:], buf2[:])
if err != nil {
log.Fatalln(err)
return
}
} |
|
demo_predict_presynch_eT.py | import numpy
import scipy.stats
import itertools
import copy
import string
import os
from collections import Counter, defaultdict
from filter_data_methods import *
from igraph import *
from transCSSR import *
data_prefix = ''
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#
# The various test transducers. Xt is the input
# and Yt is the output.
#
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Xt_name = 'coinflip'
# Yt_name = 'coinflip-excite_w_refrac'
Xt_name = 'barnettX'
Yt_name = 'barnettY'
# Xt_name = ''
# Yt_name = 'even'
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# | stringY = open('data/{}{}.dat'.format(data_prefix, Yt_name)).readline().strip()
if Xt_name == '':
stringX = '0'*len(stringY)
else:
stringX = open('data/{}{}.dat'.format(data_prefix, Xt_name)).readline().strip()
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#
# Set the parameters and associated quantities:
# axs, ays -- the input / output alphabets
# alpha -- the significance level associated with
# CSSR's hypothesis tests.
# L -- The maximum history length to look
# back when inferring predictive
# distributions.
#
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if Xt_name == '':
axs = ['0']
ays = ['0', '1']
else:
axs = ['0', '1']
ays = ['0', '1']
e_symbols = list(itertools.product(axs, ays)) # All of the possible pairs of emission
# symbols for (x, y)
alpha = 0.001
verbose = False
# L is the maximum amount we want to ever look back.
L_max = 3
Tx = len(stringX); Ty = len(stringY)
assert Tx == Ty, 'The two time series must have the same length.'
T = Tx
word_lookup_marg, word_lookup_fut = estimate_predictive_distributions(stringX, stringY, L_max)
epsilon, invepsilon, morph_by_state = run_transCSSR(word_lookup_marg, word_lookup_fut, L_max, axs, ays, e_symbols, Xt_name, Yt_name, alpha = alpha)
ind_go_to = 20
possible_states_from_predict_presynch_eT = numpy.zeros((ind_go_to-1, len(invepsilon)), dtype = numpy.int32)
for cur_ind in range(1, ind_go_to):
curX = stringX[:cur_ind]
curY = stringY[:cur_ind-1]
preds, possible_states = predict_presynch_eT(curX, curY, machine_fname = 'transCSSR_results/+{}.dot'.format(Xt_name), transducer_fname = 'transCSSR_results/{}+{}.dot'.format(Xt_name, Yt_name), axs = axs, ays = ays, inf_alg = 'transCSSR')
possible_states_from_predict_presynch_eT[cur_ind - 1] = possible_states
print((cur_ind, curX, curY + '*', preds.tolist(), possible_states))
print('')
preds_all, possible_states_all = filter_and_pred_probs(stringX, stringY, machine_fname = 'transCSSR_results/+{}.dot'.format(Xt_name), transducer_fname = 'transCSSR_results/{}+{}.dot'.format(Xt_name, Yt_name), axs = axs, ays = ays, inf_alg = 'transCSSR')
for cur_ind in range(1, ind_go_to):
curX = stringX[:cur_ind]
curY = stringY[:cur_ind-1]
print((cur_ind, curX, curY + '*', preds_all[cur_ind-1, :].tolist(), possible_states_all[cur_ind-1, :].tolist()))
filtered_states, filtered_probs, stringY_pred = filter_and_predict(stringX, stringY, epsilon, invepsilon, morph_by_state, axs, ays, e_symbols, L_max, memoryless = False)
print_go_to = 40
print(("\n\nFirst {} predictions.".format(print_go_to)))
for ind in range(print_go_to):
print((filtered_probs[ind], preds_all[ind, 1]))
print(("\n\nLast {} predictions.".format(print_go_to)))
for ind in range(preds_all.shape[0] - print_go_to, preds_all.shape[0]):
print((filtered_probs[ind], preds_all[ind, 1]))
import matplotlib.pyplot as plt
plt.figure()
plt.plot(filtered_probs[:, 1], label = 'Using filter_and_predict')
plt.plot(preds_all[:, 1], label = 'Using filter_and_pred_probs')
plt.xlim([0, 1000])
plt.legend()
plt.show() | # Load in the data for each process.
#
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
one_of_device_filters_inner_value_items.py | # coding: utf-8
"""
Automox Console API
API for use with the Automox Console # noqa: E501
OpenAPI spec version: 2021-11-16
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class OneOfDeviceFiltersInnerValueItems(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""OneOfDeviceFiltersInnerValueItems - a model defined in Swagger""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item, | result[attr] = value
if issubclass(OneOfDeviceFiltersInnerValueItems, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OneOfDeviceFiltersInnerValueItems):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | value.items()
))
else: |
reduce2.py | from pygears.typing import Tuple, Uint, Union, Queue
from pygears.lib import fmap, demux, decouple, fifo, union_collapse
from pygears.lib import priority_mux, replicate
TCfg = Tuple[{'reduce_size': Uint['w_reduce_size'], 'init': 't_acc'}]
@gear
def reduce2(din, cfg: TCfg, *, f, max_size):
"""Similar to the Python reduce function, applies a rolling computation to
sequential pairs of values in a list. The ``din`` input is of type
:class:`Queue` which holds the values to be used for computation while the
``cfg`` input is a :class:`Tuple` consisting of a ``reduce_size`` field and
the ``init`` field holding the inital value.
Args:
f: Function to be performed
max_size: Maximal length of the input `Queue` which is the depth of the
FIFO used for storing intermediate values
Returns:
The result of the reduce operation
"""
acctype = cfg.dtype['init']
qtype = Queue[acctype, din.dtype.lvl - 1]
temp_res = Intf(dtype=qtype)
cfg_rep = cfg | replicate
sec_opnd = (cfg_rep, temp_res) \
| priority_mux \
| fmap(f=union_collapse, fcat=czip, lvl=1)
result = czip(din, sec_opnd) | decouple | fmap(f=f, fcat=czip, lvl=2)
acc, fin_res = result | Union[qtype, qtype] | demux
acc | fifo(intfs=[temp_res], depth=max_size)
return fin_res | from pygears import gear, Intf
from pygears.lib import czip |
|
log_path_utils.py | import os
from ufs_tools.folder_tool import ensure_dir
def get_log_file_path(folder, log_file_name, ext=".log"):
| log_folder_relative_path = os.path.join('logs', folder)
log_filename = '%s%s' % (log_file_name, ext)
current_dir = os.path.join(os.getcwd())
folder_log_full_path = os.path.join(current_dir, log_folder_relative_path)
log_full_path = os.path.join(folder_log_full_path, log_filename)
ensure_dir(folder_log_full_path)
return log_full_path |
|
bind-new.js | define(['require', './PageView'], function(require, PageView) {
return PageView.extend({
defaults: {
toastTime: null
},
// 如果想在页面载入时完成工作
route: function(options) {
PageView.prototype.route.apply(this, arguments);
},
postRoute: function() {
/*隐藏tabBar*/
this.triggerEvent("barHide");
},
events: {
'click .goback': 'goBack',
'click .back-btn': 'comeBack',
'click .btnNext': 'nextStep',
'click .bind-new-code-btn': 'askForCode',
'input .phoneNum-input': 'btnStatus',
'input .code-input': 'btnStatus',
},
initialize: function(options) {
PageView.prototype.initialize.apply(this, arguments);
},
render: function() {
PageView.prototype.render.apply(this, arguments);
return this;
},
onResume: function() {
this.toggleBar && this.toggleBar('hide');
this.$('.bar') .show();
},
btnStatus: function() {
var code = this.$el.find('.code-input')
.val();
var phone = this.$el.find('.phoneNum-input')
.val();
if(code && phone) {
if($('.bind-new-code-btn').hasClass('identifyCode-btn-disabled')) {
this.$el.find('.submit-btn').removeAttr('disabled');
$('.submit-btn').removeClass('theme-backgorund-color-contrast')
.removeClass('theme-border-color-contrast');
$('.submit-btn').addClass('theme-backgorund-color').addClass('theme-border-color');
}
} else {}
},
checkPhone: function(phone) {
var pattern = /^1[0-9]{10}$/;
return pattern.test(phone);
},
askForCode: function() {
var _this = this;
//校验手机格式
var phoneNum = this.$el.find('.phoneNum-input')
.val();
if(!this.checkPhone(phoneNum.trim())) {
library.Toast('手机号格式错误');
} else {
var $btn = $('.bind-new-code-btn');
if($btn.hasClass( 'identifyCode-btn-disabled')) return;
$btn.html('已获取验证码(30)').addClass('identifyCode-btn-disabled').removeClass('theme-color');
var count = 30;
var ret = setInterval(
function() {
--count;
if(count == 0) {
$btn.html('获取验证码').removeClass('identifyCode-btn-disabled')
.addClass('theme-color');
clearInterval(ret);
} else {
$btn.html('已获取验证码(' + count + 's)');
}
}, 1000);
this.sendData(window.app.api +
'/getVerifyCode', {
userPhone: phoneNum.trim()
},
function(res) {
console.log(res);
if(res.status == "0") {
/*var $btn = $(
'.identifyCode-btn'
);
$btn.html('已获取验证码(60)')
.addClass(
'identifyCode-btn-disabled'
)
.removeClass(
'theme-color');
var count = 60;
var ret = setInterval(
function() {
--count;
if(count == 0) {
$btn.html(
'获取验证码'
)
.removeClass(
'identifyCode-btn-disabled'
)
.addClass(
'theme-color'
);
clearInterval
(ret);
} else {
$btn.html(
'已获取验证码(' +
count +
')'
);
}
}, 1000);*/
_this.sessionid = res.data.sessionId;
} else {
library.Toast(res.message);
}
}.bind(this),
function() {
library.Toast('网络错误');
});
}
},
sendData: function(url, data, callback, errorcallback) {
var BaseModel = require('../models/BaseModel');
var baseModel = new BaseModel();
var options = {
type: "POST",
url: url,
data: data,
datatype: "json", //"xml", "html", "script", "json", "jsonp", "text".
success: callback,
error: errorcallback
}
baseModel.loadData(options);
},
nextStep: function() {
var code = this.$el.find('.code-input') | .val();
var verifyId = this.params.verifyId;
if(!verifyId){
library.Toast('请验证原手机号码再进行绑定');
return ;
}
if(code && phone) {
var userinfo = JSON.parse( localStorage.getItem(
'userinfo' ) );
//上面这里用于测试
/*下面这段暂时屏蔽,因为没有响应的接口*/
library.LoadingBar('验证中...');
var options = {
url: window.app.api +
'/bandUserPhone',
type: "POST",
needReset: true,
data: {
userid: userinfo.userid,
checkCode: code,
userPhone: phone,
sessionId: this.sessionid,
verifyId: verifyId
},
datatype: "json",
success: function(res) {
if(res.status == '0') {
$('.mobile-bind')
.hide();
$('.success-tips-view')
.show();
library.DismissLoadingBar();
} else {
library.DismissLoadingBar();
library.Toast(res.message);
}
},
error: function() {
library.DismissLoadingBar();
library.Toast('网络错误');
}
}
if(!this.baseModel) {
var BaseModel = require(
'../models/BaseModel');
this.baseModel = new BaseModel();
}
this.baseModel.loadData(options); //请求数据
} else {
library.Toast('请输入验证码');
}
/* $.ajax({
//提交数据的类型 POST GET
type: "POST",
//提交的网址
url: window.app.api + '/validatepassword',
//提交的数据
data:{userid:JSON.parse(localStorage.getItem('userinfo')).userid,password:passwd},
//返回数据的格式
datatype: "json", //"xml", "html", "script", "json", "jsonp", "text".
//在请求之前调用的函数
beforeSend: function () {
console.log(this.data);
},
//成功返回之后调用的函数
success: function (res) {
if(res.status=='ok'){
library.DismissLoadingBar();
Backbone.history.navigate('#my-navigate/bind-new?encrypt='+res.data.encrypt+'&passSessionId='+res.data.passSessionId, {
trigger: true
});
}else{
library.DismissLoadingBar();
library.Toast(res.message);
}
},
//调用执行后调用的函数
complete: function (XMLHttpRequest, textStatus) {
//HideLoading();
},
//调用出错执行的函数
error: function () {
library.DismissLoadingBar();
//请求出错处理
}
});*/
//var num = 0;
//if(/[0-9]+/.test(username)) num++
//if(/[a-zA-Z]+/.test(username)) num = num + 2
//if(/[^0-9a-zA-Z\s\u4e00-\u9fa5]+/.test(username)) num = num + 4
//if(num>3){
// //暂时未认证
// if(this.toastTime!=null){
// clearTimeout(this.toastTime);
// }
// $('.login-toast').addClass('login-toast-active');
// this.toastTime=setTimeout(function(){
// $('.login-toast').removeClass('login-toast-active');
// },1000);
//
//}
//else {
// Backbone.history.navigate('#my-navigate/bind-new', {
// trigger: true
// })
//}
},
comeBack: function(e) {
Backbone.history.navigate('#my-navigate/my', {
trigger: true
})
},
goBack: function() {
window.history.back();
},
});
}); | .val();
var phone = this.$el.find('.phoneNum-input') |
launcher.py | import os
import sys
import time
import importlib
from pkg_resources import iter_entry_points
from qtpy.QtCore import Qt
from qtpy.QtWidgets import QApplication
import qtpyvcp
from qtpyvcp import hal
from qtpyvcp.utilities.logger import getLogger
from qtpyvcp.plugins import registerPluginFromClass, postGuiInitialisePlugins
from qtpyvcp.widgets.dialogs.error_dialog import ErrorDialog, IGNORE_LIST
from qtpyvcp.utilities.info import Info
LOG = getLogger(__name__)
INFO = Info()
# Catch unhandled exceptions and display in dialog
def excepthook(exc_type, exc_msg, exc_tb):
try:
filename = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
lineno = exc_tb.tb_lineno
except AttributeError:
# AttributeError: 'NoneType' object has no attribute 'tb_frame'
filename = 'unknown file'
lineno = -1
if len(IGNORE_LIST) > 0 and (str(exc_type), str(exc_msg), lineno) in IGNORE_LIST:
LOG.debug('Ignoring unhandled exception in %s line %i', filename, lineno,
exc_info=(exc_type, exc_msg, exc_tb))
return
LOG.critical('Unhandled exception in %s line %i', filename, lineno,
exc_info=(exc_type, exc_msg, exc_tb))
# if an exception occurs early on a qApp may not have been created yet,
# so create one so the dialog will be able to run without errors.
if QApplication.instance() is None:
app = QApplication([])
error_dialog = ErrorDialog(exc_info=(exc_type, exc_msg, exc_tb))
error_dialog.exec_()
sys.excepthook = excepthook
def log_time(task, times=[time.time(), time.time()]):
now = time.time()
LOG.debug("yellow<Time:> {:.3f} (green<{:+.3f}>) - {}"
.format(now - times[0], now - times[1], task))
times[1] = now
log_time("in script")
def launch_application(opts, config):
qtpyvcp.OPTIONS.update(opts)
qtpyvcp.CONFIG.update(config)
hal_comp = hal.component('qtpyvcp')
LOG.debug('Loading data plugings')
loadPlugins(config['data_plugins'])
log_time('done loading data plugins')
LOG.debug('Initializing app')
app = _initialize_object_from_dict(config['application'])
log_time('done initializing app')
LOG.debug('Loading dialogs')
loadDialogs(config['dialogs'])
log_time('done loading dialogs')
LOG.debug('Loading windows')
loadWindows(config['windows'])
log_time('done loading windows')
LOG.debug('Initializing widgets')
app.initialiseWidgets()
log_time('done initializing widgets')
hal_comp.ready()
# load any post GUI hal file
postgui_halfile = INFO.getPostguiHalfile()
if postgui_halfile != "":
if not os.path.exists(postgui_halfile):
raise IOError('The specified POSTGUI_HALFILE does not exist: %s' %
postgui_halfile)
ini_path = INFO.INI_FILE
LOG.info('Loading POSTGUI_HALFILE: %s', postgui_halfile)
res = os.spawnvp(os.P_WAIT, "halcmd", ["halcmd", "-i", ini_path, "-f", postgui_halfile])
if res:
raise SystemExit("Failed to load POSTGUI_HALFILE with error: %s" % res)
# suppress QtQuick warnings
app.setAttribute(Qt.AA_DontCreateNativeWidgetSiblings)
sys.exit(app.exec_())
def load_vcp(opts):
vcp = opts.vcp
if vcp is None:
return
vcp_path = os.path.realpath(os.path.join(os.getenv('OLDPWD', '~'), vcp))
if os.path.isfile(vcp_path):
LOG.debug("Attempting to load VCP from file: {}".format(vcp_path))
directory, filename = os.path.split(vcp_path)
name, ext = os.path.splitext(filename)
if ext.lower() in ['.yaml', '.yml']:
_load_vcp_from_yaml_file(vcp_path, opts)
return
elif ext.lower() == '.ui':
_load_vcp_from_ui_file(vcp_path, opts)
return
if _load_vcp_from_entry_point(vcp, opts):
return
LOG.error("Could not load {}, make sure that the name or "
"file path is correct.".format(vcp_path))
def _load_vcp_from_yaml_file(yaml_file, opts):
LOG.info("Loading VCP from YAML file: yellow<{}>".format(yaml_file))
from qtpyvcp.utilities.config_loader import load_config_files
cfg_files = [opts.config_file or '']
cfg_files.extend(os.getenv('VCP_CONFIG_FILES', '').split(':'))
cfg_files.append(yaml_file)
cfg_files.append(qtpyvcp.DEFAULT_CONFIG_FILE)
config = load_config_files(*cfg_files)
# add the YAML file dir to path so can import relative modules
sys.path.insert(0, os.path.dirname(os.path.dirname(yaml_file)))
launch_application(opts, config)
def | (ui_file, opts):
LOG.info("Loading VCP from UI file: yellow<{}>".format(ui_file))
from qtpyvcp.utilities.config_loader import load_config_files
cfg_files = [opts.config_file or '']
cfg_files.extend(os.getenv('VCP_CONFIG_FILES', '').split(':'))
cfg_files.append(qtpyvcp.DEFAULT_CONFIG_FILE)
config = load_config_files(*cfg_files)
kwargs = config['windows']['mainwindow'].get('kwargs', {})
kwargs.update({'ui_file': ui_file})
config['windows']['mainwindow']['kwargs'] = kwargs
launch_application(opts, config)
def _load_vcp_from_entry_point(vcp_name, opts):
entry_points = {}
for entry_point in iter_entry_points(group='qtpyvcp.example_vcp'):
entry_points[entry_point.name] = entry_point
for entry_point in iter_entry_points(group='qtpyvcp.test_vcp'):
entry_points[entry_point.name] = entry_point
for entry_point in iter_entry_points(group='qtpyvcp.vcp'):
entry_points[entry_point.name] = entry_point
try:
vcp = entry_points[vcp_name.lower()].load()
except KeyError:
LOG.exception("Failed to find entry point: {}".format(vcp_name))
except Exception as e:
LOG.debug(e)
LOG.exception("Failed to load entry point: {}".format(vcp_name))
else:
vcp.main(opts)
LOG.info("Loading VCP from entry point: {}".format(vcp_name))
vcp.main(opts)
return True
def _get_object_by_referance(object_ref):
modname, sep, attrname = object_ref.partition(':')
try:
return getattr(importlib.import_module(modname), attrname)
except Exception:
LOG.critical("Failed to get object by reference: {}".format(object_ref))
raise
def _initialize_object_from_dict(object_dict, parent=None):
"""Initialize a python object from dict."""
provider = object_dict['provider']
args = object_dict.get('args') or []
kwargs = object_dict.get('kwargs') or {}
obj = _get_object_by_referance(provider)
if parent is not None:
kwargs.update({'parent': parent})
return obj(*args, **kwargs)
def loadPlugins(plugins):
for plugin_id, plugin_dict in list(plugins.items()):
try:
cls = plugin_dict['provider']
except KeyError:
raise ValueError("No provider class specified for %s plugin" % plugin_id)
args = plugin_dict.get('args', [])
kwargs = plugin_dict.get('kwargs', {})
registerPluginFromClass(plugin_id=plugin_id, plugin_cls=cls, args=args, kwargs=kwargs)
def loadWindows(windows):
for window_id, window_dict in list(windows.items()):
window = _initialize_object_from_dict(window_dict)
qtpyvcp.WINDOWS[window_id] = window
if window_id == 'mainwindow':
postGuiInitialisePlugins(window)
# show the window by default
if window_dict.get('show', True):
window.show()
def loadDialogs(dialogs):
for dialogs_id, dialogs_dict in list(dialogs.items()):
inst = _initialize_object_from_dict(dialogs_dict)
qtpyvcp.DIALOGS[dialogs_id] = inst
| _load_vcp_from_ui_file |
social-connect-button-test.js | import { module, test } from 'qunit';
import { setupRenderingTest } from 'ember-qunit';
import { render, find, click, findAll } from '@ember/test-helpers';
import hbs from 'htmlbars-inline-precompile';
module('Integration | Component | nypr accounts/social connect button', function(hooks) {
setupRenderingTest(hooks);
test('it renders', async function(assert) {
await render(hbs`{{nypr-accounts/social-connect-button}}`);
assert.equal(findAll('.nypr-social-connect').length, 1);
});
test('it shows a manage link when connected', async function(assert) {
let providerName='LoginZone';
let manageUrl='http://example.com';
this.set('providerName', providerName);
this.set('manageUrl', manageUrl);
await render(hbs`{{nypr-accounts/social-connect-button
connected=true
providerName=providerName
manageUrl=manageUrl
}}`);
assert.equal(findAll('button').length, 0);
assert.equal(findAll('a').length, 1);
assert.equal(find('a').textContent.trim(), `Manage ${providerName} connection`);
assert.equal(find('a').getAttribute('href'), manageUrl);
});
test('it shows a button when not connected', async function(assert) {
let providerName='LoginZone';
let buttonClass='loginzone-orange';
this.set('providerName', providerName);
this.set('buttonClass', buttonClass);
await render(hbs`{{nypr-accounts/social-connect-button
connected=false
providerName=providerName
buttonClass=buttonClass
}}`);
assert.equal(findAll('a').length, 0);
assert.equal(findAll('button').length, 1);
assert.equal(find('button').textContent.trim(), `Connect with ${providerName}`);
assert.ok(find('button').classList.contains(buttonClass));
});
| connected=false
providerIcon=providerIcon
}}`);
assert.equal(findAll('button > i').length, 1);
assert.ok(find('button > i').classList.contains(`fa-${providerIcon}`));
});
test('button triggers action', async function(assert) {
let timesButtonWasPressed = 0;
let buttonAction = () => timesButtonWasPressed++;
this.set('buttonAction', buttonAction);
await render(hbs`{{nypr-accounts/social-connect-button
connected=false
buttonAction=buttonAction
}}`);
await click('button');
assert.equal(timesButtonWasPressed, 1);
});
}); | test('button shows font awesome icon', async function(assert) {
let providerIcon='loginzone';
this.set('providerIcon', providerIcon);
await render(hbs`{{nypr-accounts/social-connect-button |
simple_gradient.py | import math
from typing import List
import numpy
from allennlp.common.util import JsonDict, sanitize
from allennlp.interpret.saliency_interpreters.saliency_interpreter import SaliencyInterpreter
from allennlp.nn import util
@SaliencyInterpreter.register("simple-gradient")
class SimpleGradient(SaliencyInterpreter):
"""
Registered as a `SaliencyInterpreter` with name "simple-gradient".
"""
def saliency_interpret_from_json(self, inputs: JsonDict) -> JsonDict:
"""
Interprets the model's prediction for inputs. Gets the gradients of the loss with respect
to the input and returns those gradients normalized and sanitized.
"""
labeled_instances = self.predictor.json_to_labeled_instances(inputs)
| # List of embedding inputs, used for multiplying gradient by the input for normalization
embeddings_list: List[numpy.ndarray] = []
instances_with_grads = dict()
for idx, instance in enumerate(labeled_instances):
# Hook used for saving embeddings
handle = self._register_forward_hook(embeddings_list)
grads = self.predictor.get_gradients([instance])[0]
handle.remove()
# Gradients come back in the reverse order that they were sent into the network
embeddings_list.reverse()
for key, grad in grads.items():
# Get number at the end of every gradient key (they look like grad_input_[int],
# we're getting this [int] part and subtracting 1 for zero-based indexing).
# This is then used as an index into the reversed input array to match up the
# gradient and its respective embedding.
input_idx = int(key[-1]) - 1
# The [0] here is undo-ing the batching that happens in get_gradients.
emb_grad = numpy.sum(grad[0] * embeddings_list[input_idx], axis=1)
norm = numpy.linalg.norm(emb_grad, ord=1)
normalized_grad = [math.fabs(e) / norm for e in emb_grad]
grads[key] = normalized_grad
instances_with_grads["instance_" + str(idx + 1)] = grads
return sanitize(instances_with_grads)
def _register_forward_hook(self, embeddings_list: List):
"""
Finds all of the TextFieldEmbedders, and registers a forward hook onto them. When forward()
is called, embeddings_list is filled with the embedding values. This is necessary because
our normalization scheme multiplies the gradient by the embedding value.
"""
def forward_hook(module, inputs, output):
embeddings_list.append(output.squeeze(0).clone().detach().numpy())
embedding_layer = util.find_embedding_layer(self.predictor._model)
handle = embedding_layer.register_forward_hook(forward_hook)
return handle | |
err.rs | /// Convenience `Err` creation function.
pub fn | <S: Into<String>, O>(msg: S) -> anyhow::Result<O> {
Err(anyhow::anyhow!("{}", msg.into()))
}
| err |
setup.py | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import auth_rename
setup(
name='ahlev-django-auth-rename',
version=auth_rename.__version__,
description='to rename authentication and authorization',
long_description='to rename authentication and authorization',
long_description_content_type='text/x-rst',
author='ahlev',
author_email='[email protected]',
include_package_data=True,
url='https://github.com/ohahlev/ahlev-django-auth-rename/tree/%s' % auth_rename.__version__, | 'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
zip_safe=False,
)
# Usage of setup.py:
# $> python setup.py register # registering package on PYPI
# $> python setup.py build sdist upload # build, make source dist and upload to PYPI | packages=find_packages(),
classifiers=[
'Development Status :: 4 - Beta', |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.