text
stringlengths 2
100k
| meta
dict |
---|---|
/*
* This file is used to test pull down of count(distinct) expression
*/
drop schema if exists distribute_count_distinct_part2 cascade;
create schema distribute_count_distinct_part2;
set current_schema = distribute_count_distinct_part2;
-- prepare a temp table for import data
create table tmp_t1(c1 int);
insert into tmp_t1 values (1);
-- Create Table and Insert Data
create table t_distinct(a int, b int, c int, d int, e regproc);
insert into t_distinct select generate_series(1, 10000)%5001, generate_series(1, 10000)%750, generate_series(1, 10000)%246, generate_series(1, 10000)%7, 'sin' from tmp_t1;
analyze t_distinct;
-- Case 2.3 sub level
-- Case 2.3.1 count_distinct within target list
explain (costs off, nodes off) select distinct da from (select count(distinct(a)) da, max(b+c), avg(d) from t_distinct);
select distinct da from (select min(distinct(a)) da, max(b+c), avg(d) from t_distinct);
explain (costs off, nodes off) select distinct db from (select count(distinct(b)) db, max(a-c), avg(d) from t_distinct);
select distinct db from (select max(distinct(b)) db, max(a-c), avg(d) from t_distinct);
explain (costs off, nodes off) select distinct db from (select count(distinct(b+c)) db, max(a-c), avg(d) from t_distinct);
select distinct db from (select count(distinct(b+c)) db, max(a-c), avg(d) from t_distinct);
-- Case 2.3.3 count_distinct within other place
explain (costs off, nodes off) select distinct mb from (select max(b) mb from t_distinct order by count(distinct(d)));
select distinct mb from (select max(b) mb from t_distinct order by count(distinct(d)));
explain (costs off, nodes off) select distinct mb from (select max(b) mb from t_distinct having count(distinct(d))=7);
select distinct mb from (select max(b) mb from t_distinct having count(distinct(d))=7);
explain (costs off, nodes off) select distinct aabcd from (select avg(a+b+c+d) aabcd from t_distinct having count(distinct(a))=max(a)+1);
select distinct aabcd from (select avg(a+b+c+d) aabcd from t_distinct having count(distinct(a))=max(a)+1);
explain (costs off, nodes off) select distinct ac from (select avg(c) ac from t_distinct order by count(distinct(a)));
select distinct ac from (select avg(c::numeric(15,10)) ac from t_distinct order by count(distinct(a)));
explain (costs off, nodes off) select distinct ac from (select avg(c) ac from t_distinct order by count(distinct(d)));
select distinct ac from (select avg(c::numeric(15,10)) ac from t_distinct order by count(distinct(d)));
-- Case 2.4 with distinct
explain (costs off, nodes off) select distinct count(distinct(b)) from t_distinct;
select distinct count(distinct(b)) from t_distinct;
explain (costs off, nodes off) select distinct count(distinct(a)) from t_distinct;
select distinct count(distinct(a)) from t_distinct;
-- Case 2.5 non-projection-capable-subplan
explain (costs off, nodes off) select count(distinct(b+c)) from (select b, c, d from t_distinct union all select b, c, d from t_distinct);
select count(distinct(b+c)) from (select b, c, d from t_distinct union all select b, c, d from t_distinct);
-- Case 3 groupagg optimization
set enable_hashagg=off;
explain (costs off, nodes off) select avg(distinct(a)) from t_distinct;
select avg(distinct(a)) from t_distinct;
explain (costs off, nodes off) select avg(distinct(b::numeric(5,1)))+5, d from t_distinct group by d order by 2;
select avg(distinct(b::numeric(5,1)))+5, d from t_distinct group by d order by 2;
explain (costs off, nodes off) select avg(distinct(c))+count(distinct(c)), b from t_distinct group by b order by 2;
select avg(distinct(c))+count(distinct(c)), b from t_distinct group by b order by 2 limit 10;
explain (costs off, nodes off) select c from t_distinct group by c having avg(distinct(c))>50 order by 1;
select c from t_distinct group by c having avg(distinct(c))>50 order by 1 limit 10;
explain (costs off, nodes off) select b, c from t_distinct group by b, c order by b, count(distinct(c))-c;
select b, c from t_distinct group by b, c order by b, count(distinct(c))-c limit 10;
explain (costs off, nodes off) select count(distinct(c)), d from t_distinct group by d having avg(distinct(c)) <> 0 order by 2;
select count(distinct(c)), d from t_distinct group by d having avg(distinct(c)) <> 0 order by 2;
reset enable_hashagg;
-- Case 4 two_level_hashagg
explain (costs off, nodes off) select count(distinct(b)), count(c), d from t_distinct group by d order by d;
select count(distinct(b)), count(c), d from t_distinct group by d order by d;
explain (costs off, nodes off) select avg(distinct(b)), d, count(c) from t_distinct group by d order by d;
select avg(distinct(b)), d, count(c) from t_distinct group by d order by d;
explain (costs off, nodes off) select count(c), count(distinct(a)), d from t_distinct group by d order by d;
select count(c), count(distinct(a)), d from t_distinct group by d order by d;
explain (costs off, nodes off) select count(c), d, count(distinct(d)) from t_distinct group by d order by 1;
select count(c), d, count(distinct(d)) from t_distinct group by d order by 1, 2;
explain (costs off, nodes off) select count(c), d, count(distinct(b)) from t_distinct group by d order by 3;
select count(c), d, count(distinct(b)) from t_distinct group by d order by 3, 2;
explain (costs off, nodes off) select count(d), count(distinct(d)), a%2, b%2, c%2 from t_distinct group by 3,4,5 order by 3,4,5;
select count(d), count(distinct(d)), a%2, b%2, c%2 from t_distinct group by 3,4,5 order by 3,4,5;
explain (costs off, nodes off) select count(c), d from t_distinct group by d having count(distinct(d))=1 order by d;
select count(c), d from t_distinct group by d having count(distinct(d))=1 order by d;
explain (costs off, nodes off) select count(c), count(distinct(b)) from t_distinct group by d having count(d)=1428 order by d;
select count(c), count(distinct(b)) from t_distinct group by d having count(d)=1428 order by d;
explain (costs off, nodes off) select count(distinct(c)), d from t_distinct group by d order by count(c), d;
select count(distinct(c)), d from t_distinct group by d order by count(c), d;
explain (costs off, nodes off) select count(distinct(c)), d from t_distinct where c <= any (select count(distinct(b)) from t_distinct group by c limit 5) group by d order by d;
select count(distinct(c)), d from t_distinct where c <= any (select count(distinct(b)) from t_distinct group by c limit 5) group by d order by d;
-- Case 5 multi-level count(distinct)
-- Case 5.1 normal case
explain (costs off, nodes off) select count(distinct(a)), count(distinct(b)) from t_distinct;
select count(distinct(a)), count(distinct(b)) from t_distinct;
explain (costs off, nodes off) select count(distinct(a)) from t_distinct having count(distinct(a))>5000;
select count(distinct(a)) from t_distinct having count(distinct(a))>5000;
explain (costs off, nodes off) select count(distinct(b)) from t_distinct order by count(distinct(d));
select count(distinct(b)) from t_distinct order by count(distinct(d));
explain (costs off, nodes off) select count(distinct(a)) col1, max(b) col2, count(distinct(b)) col3, c, count(distinct(c)) col4, d from t_distinct group by d,c;
select count(distinct(a)) col1, max(b) col2, count(distinct(b)) col3, c, count(distinct(c)) col4, d from t_distinct group by d,c order by d,c limit 10;
explain (costs off, nodes off) select count(distinct(a)) col1, max(b) col2, count(distinct(b)) col3, min(c) col4, count(distinct(c)) guo, avg(a) qiang from t_distinct;
select count(distinct(a)) col1, max(b) col2, count(distinct(b)) col3, min(c) col4, count(distinct(c)) guo, avg(a) qiang from t_distinct;
explain (costs off, nodes off) select count(distinct(a))+avg(b) col2, count(c) col3, d from t_distinct group by d having count(distinct(c))>5;
select count(distinct(a))+avg(b) col2, count(c) col3, d from t_distinct group by d having count(distinct(c))>5 order by d;
explain (costs off, nodes off) select count(distinct(a)) col1, avg(b) col2, count(c) col3, d from t_distinct group by d order by d, avg(distinct(c));
select count(distinct(a)) col1, avg(b) col2, count(c) col3, d from t_distinct group by d order by d, avg(distinct(c));
explain (costs off, nodes off) select count(distinct(a)) col1, d, avg(b) col2, sum(distinct(a)) col3, avg(distinct(c)) col4 from t_distinct group by d order by d, avg(distinct(c));
select count(distinct(a)) col1, d, avg(b) col2, sum(distinct(a)) col3, avg(distinct(c)) col4 from t_distinct group by d order by d, avg(distinct(c));
explain (costs off, nodes off) select distinct case when min(distinct c)>60 then min(distinct c) else null end as min, count(distinct(b)) from t_distinct group by b;
select distinct case when min(distinct c)>60 then min(distinct c) else null end as min, count(distinct(b)) from t_distinct group by b order by 1 nulls first limit 5;
explain (costs off, nodes off) select count(distinct(a)) col1, d, avg(b) col2, sum(distinct(a)) col3, avg(distinct(c)) col4 from t_distinct group by d having col1=1428 or d+col4>125 order by d, avg(distinct(c));
select count(distinct(a)) col1, d, avg(b) col2, sum(distinct(a)) col3, avg(distinct(c)) col4 from t_distinct group by d having col1=1428 or d+col4>125 order by d, avg(distinct(c));
reset current_schema;
drop schema if exists distribute_count_distinct_part2 cascade;
| {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 2014, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*
*/
#import "BFTask.h"
#import <libkern/OSAtomic.h>
#import "Bolts.h"
__attribute__ ((noinline)) void warnBlockingOperationOnMainThread() {
NSLog(@"Warning: A long-running operation is being executed on the main thread. \n"
" Break on warnBlockingOperationOnMainThread() to debug.");
}
NSString *const BFTaskErrorDomain = @"bolts";
NSInteger const kBFMultipleErrorsError = 80175001;
NSString *const BFTaskMultipleExceptionsException = @"BFMultipleExceptionsException";
@interface BFTask () {
id _result;
NSError *_error;
NSException *_exception;
}
@property (nonatomic, assign, readwrite, getter=isCancelled) BOOL cancelled;
@property (nonatomic, assign, readwrite, getter=isFaulted) BOOL faulted;
@property (nonatomic, assign, readwrite, getter=isCompleted) BOOL completed;
@property (nonatomic, strong) NSObject *lock;
@property (nonatomic, strong) NSCondition *condition;
@property (nonatomic, strong) NSMutableArray *callbacks;
@end
@implementation BFTask
#pragma mark - Initializer
- (instancetype)init {
self = [super init];
if (!self) return nil;
_lock = [[NSObject alloc] init];
_condition = [[NSCondition alloc] init];
_callbacks = [NSMutableArray array];
return self;
}
- (instancetype)initWithResult:(id)result {
self = [super init];
if (!self) return nil;
[self trySetResult:result];
return self;
}
- (instancetype)initWithError:(NSError *)error {
self = [super init];
if (!self) return nil;
[self trySetError:error];
return self;
}
- (instancetype)initWithException:(NSException *)exception {
self = [super init];
if (!self) return nil;
[self trySetException:exception];
return self;
}
- (instancetype)initCancelled {
self = [super init];
if (!self) return nil;
[self trySetCancelled];
return self;
}
#pragma mark - Task Class methods
+ (instancetype)taskWithResult:(id)result {
return [[self alloc] initWithResult:result];
}
+ (instancetype)taskWithError:(NSError *)error {
return [[self alloc] initWithError:error];
}
+ (instancetype)taskWithException:(NSException *)exception {
return [[self alloc] initWithException:exception];
}
+ (instancetype)cancelledTask {
return [[self alloc] initCancelled];
}
+ (instancetype)taskForCompletionOfAllTasks:(NSArray<BFTask *> *)tasks {
__block int32_t total = (int32_t)tasks.count;
if (total == 0) {
return [self taskWithResult:nil];
}
__block int32_t cancelled = 0;
NSObject *lock = [[NSObject alloc] init];
NSMutableArray *errors = [NSMutableArray array];
NSMutableArray *exceptions = [NSMutableArray array];
BFTaskCompletionSource *tcs = [BFTaskCompletionSource taskCompletionSource];
for (BFTask *task in tasks) {
[task continueWithBlock:^id(BFTask *task) {
if (task.exception) {
@synchronized (lock) {
[exceptions addObject:task.exception];
}
} else if (task.error) {
@synchronized (lock) {
[errors addObject:task.error];
}
} else if (task.cancelled) {
OSAtomicIncrement32(&cancelled);
}
if (OSAtomicDecrement32(&total) == 0) {
if (exceptions.count > 0) {
if (exceptions.count == 1) {
tcs.exception = [exceptions firstObject];
} else {
NSException *exception =
[NSException exceptionWithName:BFTaskMultipleExceptionsException
reason:@"There were multiple exceptions."
userInfo:@{ @"exceptions": exceptions }];
tcs.exception = exception;
}
} else if (errors.count > 0) {
if (errors.count == 1) {
tcs.error = [errors firstObject];
} else {
NSError *error = [NSError errorWithDomain:BFTaskErrorDomain
code:kBFMultipleErrorsError
userInfo:@{ @"errors": errors }];
tcs.error = error;
}
} else if (cancelled > 0) {
[tcs cancel];
} else {
tcs.result = nil;
}
}
return nil;
}];
}
return tcs.task;
}
+ (instancetype)taskForCompletionOfAllTasksWithResults:(NSArray<BFTask *> *)tasks {
return [[self taskForCompletionOfAllTasks:tasks] continueWithSuccessBlock:^id(BFTask *task) {
return [tasks valueForKey:@"result"];
}];
}
+ (instancetype)taskWithDelay:(int)millis {
BFTaskCompletionSource *tcs = [BFTaskCompletionSource taskCompletionSource];
dispatch_time_t popTime = dispatch_time(DISPATCH_TIME_NOW, millis * NSEC_PER_MSEC);
dispatch_after(popTime, dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^(void){
tcs.result = nil;
});
return tcs.task;
}
+ (instancetype)taskWithDelay:(int)millis
cancellationToken:(BFCancellationToken *)token {
if (token.cancellationRequested) {
return [BFTask cancelledTask];
}
BFTaskCompletionSource *tcs = [BFTaskCompletionSource taskCompletionSource];
dispatch_time_t popTime = dispatch_time(DISPATCH_TIME_NOW, millis * NSEC_PER_MSEC);
dispatch_after(popTime, dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^(void){
if (token.cancellationRequested) {
[tcs cancel];
return;
}
tcs.result = nil;
});
return tcs.task;
}
+ (instancetype)taskFromExecutor:(BFExecutor *)executor withBlock:(nullable id (^)())block {
return [[self taskWithResult:nil] continueWithExecutor:executor withBlock:^id(BFTask *task) {
return block();
}];
}
#pragma mark - Custom Setters/Getters
- (id)result {
@synchronized(self.lock) {
return _result;
}
}
- (BOOL)trySetResult:(id)result {
@synchronized(self.lock) {
if (self.completed) {
return NO;
}
self.completed = YES;
_result = result;
[self runContinuations];
return YES;
}
}
- (NSError *)error {
@synchronized(self.lock) {
return _error;
}
}
- (BOOL)trySetError:(NSError *)error {
@synchronized(self.lock) {
if (self.completed) {
return NO;
}
self.completed = YES;
self.faulted = YES;
_error = error;
[self runContinuations];
return YES;
}
}
- (NSException *)exception {
@synchronized(self.lock) {
return _exception;
}
}
- (BOOL)trySetException:(NSException *)exception {
@synchronized(self.lock) {
if (self.completed) {
return NO;
}
self.completed = YES;
self.faulted = YES;
_exception = exception;
[self runContinuations];
return YES;
}
}
- (BOOL)isCancelled {
@synchronized(self.lock) {
return _cancelled;
}
}
- (BOOL)isFaulted {
@synchronized(self.lock) {
return _faulted;
}
}
- (BOOL)trySetCancelled {
@synchronized(self.lock) {
if (self.completed) {
return NO;
}
self.completed = YES;
self.cancelled = YES;
[self runContinuations];
return YES;
}
}
- (BOOL)isCompleted {
@synchronized(self.lock) {
return _completed;
}
}
- (void)setCompleted {
@synchronized(self.lock) {
_completed = YES;
}
}
- (void)runContinuations {
@synchronized(self.lock) {
[self.condition lock];
[self.condition broadcast];
[self.condition unlock];
for (void (^callback)() in self.callbacks) {
callback();
}
[self.callbacks removeAllObjects];
}
}
#pragma mark - Chaining methods
- (BFTask *)continueWithExecutor:(BFExecutor *)executor
withBlock:(BFContinuationBlock)block {
return [self continueWithExecutor:executor block:block cancellationToken:nil];
}
- (BFTask *)continueWithExecutor:(BFExecutor *)executor
block:(BFContinuationBlock)block
cancellationToken:(BFCancellationToken *)cancellationToken {
BFTaskCompletionSource *tcs = [BFTaskCompletionSource taskCompletionSource];
// Capture all of the state that needs to used when the continuation is complete.
void (^wrappedBlock)() = ^() {
[executor execute:^{
if (cancellationToken.cancellationRequested) {
[tcs cancel];
return;
}
id result = nil;
@try {
result = block(self);
} @catch (NSException *exception) {
tcs.exception = exception;
return;
}
if ([result isKindOfClass:[BFTask class]]) {
id (^setupWithTask) (BFTask *) = ^id(BFTask *task) {
if (cancellationToken.cancellationRequested || task.cancelled) {
[tcs cancel];
} else if (task.exception) {
tcs.exception = task.exception;
} else if (task.error) {
tcs.error = task.error;
} else {
tcs.result = task.result;
}
return nil;
};
BFTask *resultTask = (BFTask *)result;
if (resultTask.completed) {
setupWithTask(resultTask);
} else {
[resultTask continueWithBlock:setupWithTask];
}
} else {
tcs.result = result;
}
}];
};
BOOL completed;
@synchronized(self.lock) {
completed = self.completed;
if (!completed) {
[self.callbacks addObject:[wrappedBlock copy]];
}
}
if (completed) {
wrappedBlock();
}
return tcs.task;
}
- (BFTask *)continueWithBlock:(BFContinuationBlock)block {
return [self continueWithExecutor:[BFExecutor defaultExecutor] block:block cancellationToken:nil];
}
- (BFTask *)continueWithBlock:(BFContinuationBlock)block
cancellationToken:(BFCancellationToken *)cancellationToken {
return [self continueWithExecutor:[BFExecutor defaultExecutor] block:block cancellationToken:cancellationToken];
}
- (BFTask *)continueWithExecutor:(BFExecutor *)executor
withSuccessBlock:(BFContinuationBlock)block {
return [self continueWithExecutor:executor successBlock:block cancellationToken:nil];
}
- (BFTask *)continueWithExecutor:(BFExecutor *)executor
successBlock:(BFContinuationBlock)block
cancellationToken:(BFCancellationToken *)cancellationToken {
if (cancellationToken.cancellationRequested) {
return [BFTask cancelledTask];
}
return [self continueWithExecutor:executor block:^id(BFTask *task) {
if (task.faulted || task.cancelled) {
return task;
} else {
return block(task);
}
} cancellationToken:cancellationToken];
}
- (BFTask *)continueWithSuccessBlock:(BFContinuationBlock)block {
return [self continueWithExecutor:[BFExecutor defaultExecutor] successBlock:block cancellationToken:nil];
}
- (BFTask *)continueWithSuccessBlock:(BFContinuationBlock)block
cancellationToken:(BFCancellationToken *)cancellationToken {
return [self continueWithExecutor:[BFExecutor defaultExecutor] successBlock:block cancellationToken:cancellationToken];
}
#pragma mark - Syncing Task (Avoid it)
- (void)warnOperationOnMainThread {
warnBlockingOperationOnMainThread();
}
- (void)waitUntilFinished {
if ([NSThread isMainThread]) {
[self warnOperationOnMainThread];
}
@synchronized(self.lock) {
if (self.completed) {
return;
}
[self.condition lock];
}
[self.condition wait];
[self.condition unlock];
}
#pragma mark - NSObject
- (NSString *)description {
// Acquire the data from the locked properties
BOOL completed;
BOOL cancelled;
BOOL faulted;
NSString *resultDescription = nil;
@synchronized(self.lock) {
completed = self.completed;
cancelled = self.cancelled;
faulted = self.faulted;
resultDescription = completed ? [NSString stringWithFormat:@" result = %@", self.result] : @"";
}
// Description string includes status information and, if available, the
// result since in some ways this is what a promise actually "is".
return [NSString stringWithFormat:@"<%@: %p; completed = %@; cancelled = %@; faulted = %@;%@>",
NSStringFromClass([self class]),
self,
completed ? @"YES" : @"NO",
cancelled ? @"YES" : @"NO",
faulted ? @"YES" : @"NO",
resultDescription];
}
@end
| {
"pile_set_name": "Github"
} |
module.exports = {
name: 'workspace',
preset: '../../jest.config.js',
transform: {
'^.+\\.[tj]sx?$': 'ts-jest',
},
moduleFileExtensions: ['ts', 'tsx', 'js', 'jsx', 'html'],
globals: { 'ts-jest': { tsConfig: '<rootDir>/tsconfig.spec.json' } },
};
| {
"pile_set_name": "Github"
} |
export class PlatformProviderMock {
get isBrowser() {
return true;
}
}
| {
"pile_set_name": "Github"
} |
# FLAC - Free Lossless Audio Codec
# Copyright (C) 2001-2009 Josh Coalson
# Copyright (C) 2011-2016 Xiph.Org Foundation
#
# This file is part the FLAC project. FLAC is comprised of several
# components distributed under different licenses. The codec libraries
# are distributed under Xiph.Org's BSD-like license (see the file
# COPYING.Xiph in this distribution). All other programs, libraries, and
# plugins are distributed under the GPL (see COPYING.GPL). The documentation
# is distributed under the Gnu FDL (see COPYING.FDL). Each file in the
# FLAC distribution contains at the top the terms under which it may be
# distributed.
#
# Since this particular file is relevant to all components of FLAC,
# it may be distributed under the Xiph.Org license, which is the least
# restrictive of those mentioned above. See the file COPYING.Xiph in this
# distribution.
#
# customizable settings from the make invocation
#
USE_OGG ?= 1
USE_ICONV ?= 1
USE_LROUND ?= 1
USE_FSEEKO ?= 1
USE_LANGINFO_CODESET ?= 1
#
# debug/release selection
#
DEFAULT_BUILD = release
F_PIC := -fPIC
# returns Linux, Darwin, FreeBSD, etc.
ifndef OS
OS := $(shell uname -s)
endif
# returns i386, x86_64, powerpc, etc.
ifndef PROC
ifeq ($(findstring Windows,$(OS)),Windows)
PROC := i386 # failsafe
# ifeq ($(findstring i686,$(shell gcc -dumpmachine)),i686) # MinGW-w64: i686-w64-mingw32
ifeq ($(findstring x86_64,$(shell gcc -dumpmachine)),x86_64) # MinGW-w64: x86_64-w64-mingw32
PROC := x86_64
endif
else
ifeq ($(shell uname -p),amd64)
PROC := x86_64
else
PROC := $(shell uname -p)
endif
endif
endif
ifeq ($(PROC),powerpc)
PROC := ppc
endif
# x64_64 Mac OS outputs 'i386' in uname -p; use uname -m instead
ifeq ($(PROC),i386)
ifeq ($(OS),Darwin)
PROC := $(shell uname -m)
endif
endif
ifeq ($(OS),Linux)
PROC := $(shell uname -m)
USE_ICONV := 0
endif
ifeq ($(findstring Windows,$(OS)),Windows)
F_PIC :=
USE_ICONV := 0
USE_LANGINFO_CODESET := 0
ifeq (mingw32,$(shell gcc -dumpmachine)) # MinGW (mainline): mingw32
USE_FSEEKO := 0
endif
endif
debug : BUILD = debug
valgrind : BUILD = debug
release : BUILD = release
# override LINKAGE on OS X until we figure out how to get 'cc -static' to work
ifeq ($(OS),Darwin)
LINKAGE = -arch $(PROC)
else
debug : LINKAGE = -static
valgrind : LINKAGE = -dynamic
release : LINKAGE = -static
endif
all default: $(DEFAULT_BUILD)
#
# GNU makefile fragment for emulating stuff normally done by configure
#
VERSION=\"1.3.3\"
CONFIG_CFLAGS=$(CUSTOM_CFLAGS) -DHAVE_STDINT_H -DHAVE_INTTYPES_H -DHAVE_CXX_VARARRAYS -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64
ifeq ($(OS),Darwin)
CONFIG_CFLAGS += -DFLAC__SYS_DARWIN -DHAVE_SYS_PARAM_H -arch $(PROC)
else
CONFIG_CFLAGS += -DHAVE_SOCKLEN_T
endif
ifeq ($(PROC),ppc)
CONFIG_CFLAGS += -DWORDS_BIGENDIAN=1 -DCPU_IS_LITTLE_ENDIAN=0
else
CONFIG_CFLAGS += -DWORDS_BIGENDIAN=0 -DCPU_IS_LITTLE_ENDIAN=1
endif
ifeq ($(OS),Linux)
ifeq ($(PROC),x86_64)
CONFIG_CFLAGS += -fPIC
endif
endif
ifeq ($(OS),FreeBSD)
CONFIG_CFLAGS += -DHAVE_SYS_PARAM_H
endif
ifneq (0,$(USE_ICONV))
CONFIG_CFLAGS += -DHAVE_ICONV
ICONV_LIBS = -liconv
else
ICONV_LIBS =
endif
ifneq (0,$(USE_OGG))
CONFIG_CFLAGS += -DFLAC__HAS_OGG=1
OGG_INCLUDES = -I$(OGG_INCLUDE_DIR)
OGG_EXPLICIT_LIBS = $(OGG_LIB_DIR)/libogg.a
OGG_LIBS = -L$(OGG_LIB_DIR) -logg
OGG_SRCS = $(OGG_SRCS_C)
else
CONFIG_CFLAGS += -DFLAC__HAS_OGG=0
OGG_INCLUDES =
OGG_EXPLICIT_LIBS =
OGG_LIBS =
OGG_SRCS =
endif
OGG_INCLUDE_DIR=$(HOME)/local/include
OGG_LIB_DIR=$(HOME)/local/lib
ifneq (0,$(USE_LROUND))
CONFIG_CFLAGS += -DHAVE_LROUND
endif
ifneq (0,$(USE_FSEEKO))
CONFIG_CFLAGS += -DHAVE_FSEEKO
endif
ifneq (0,$(USE_LANGINFO_CODESET))
CONFIG_CFLAGS += -DHAVE_LANGINFO_CODESET
endif
| {
"pile_set_name": "Github"
} |
using System;
namespace HotChocolate.Types
{
public interface ICompletedDependencyDescriptor
{
ICompletedDependencyDescriptor DependsOn<T>()
where T : ITypeSystemMember;
ICompletedDependencyDescriptor DependsOn<T>(bool mustBeCompleted)
where T : ITypeSystemMember;
ICompletedDependencyDescriptor DependsOn(Type schemaType);
ICompletedDependencyDescriptor DependsOn(
Type schemaType, bool mustBeCompleted);
ICompletedDependencyDescriptor DependsOn(NameString typeName);
ICompletedDependencyDescriptor DependsOn(
NameString typeName,
bool mustBeCompleted);
}
}
| {
"pile_set_name": "Github"
} |
// © 2016 and later: Unicode, Inc. and others.
// License & terms of use: http://www.unicode.org/copyright.html#License
qu{
Currencies{
PEN{
"S/",
"PEN",
}
}
CurrencyUnitPatterns{
other{"{0} {1}"}
}
Version{"2.1.31.33"}
}
| {
"pile_set_name": "Github"
} |
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package precis
import (
"testing"
"golang.org/x/text/runes"
)
// Compile-time regression test to ensure that Class is a Set
var _ runes.Set = (*class)(nil)
// Ensure that certain characters are (or are not) in the identifer class.
func TestClassContains(t *testing.T) {
tests := []struct {
name string
class *class
allowed []rune
disallowed []rune
}{
{
name: "Identifier",
class: identifier,
allowed: []rune("Aa0\u0021\u007e\u00df\u3007"),
disallowed: []rune("\u2150\u2100\u2200\u3164\u2190\u2600\u303b\u1e9b"),
},
{
name: "Freeform",
class: freeform,
allowed: []rune("Aa0\u0021\u007e\u00df\u3007 \u2150\u2100\u2200\u2190\u2600\u1e9b"),
disallowed: []rune("\u3164\u303b"),
},
}
for _, rt := range tests {
for _, r := range rt.allowed {
if !rt.class.Contains(r) {
t.Errorf("Class %s should contain %U", rt.name, r)
}
}
for _, r := range rt.disallowed {
if rt.class.Contains(r) {
t.Errorf("Class %s should not contain %U", rt.name, r)
}
}
}
}
| {
"pile_set_name": "Github"
} |
local T = true
local F = false
terra check()
var a = true
return not true == false
end
assert(check() == true)
terra check2()
return not T == F
end
assert(check2() == true)
terra check3()
return not T == not not T
end
assert(check3() == false)
terra check4()
return not not T == T and not not F == F and true == T and false == F
end
assert(check4() == true)
terra foo() return not false end
assert(foo() == true) | {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 2008-2017 Nelson Carpentier, Jakub Białek
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
* documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
* WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
package com.google.code.ssm.aop;
import java.lang.reflect.Method;
import java.security.InvalidParameterException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import org.aspectj.lang.JoinPoint;
import org.aspectj.lang.annotation.AfterReturning;
import org.aspectj.lang.annotation.Aspect;
import org.aspectj.lang.annotation.Pointcut;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.code.ssm.aop.support.AnnotationData;
import com.google.code.ssm.aop.support.AnnotationDataBuilder;
import com.google.code.ssm.aop.support.InvalidAnnotationException;
import com.google.code.ssm.api.UpdateMultiCache;
import com.google.code.ssm.api.UpdateMultiCacheOption;
import com.google.code.ssm.api.format.SerializationType;
import com.google.code.ssm.util.Utils;
/**
*
* @author Nelson Carpentier
* @author Jakub Białek
*
*/
@Aspect
public class UpdateMultiCacheAdvice extends MultiCacheAdvice {
private static final Logger LOG = LoggerFactory.getLogger(UpdateMultiCacheAdvice.class);
@Pointcut("@annotation(com.google.code.ssm.api.UpdateMultiCache)")
public void updateMulti() {
}
@AfterReturning(pointcut = "updateMulti()", returning = "retVal")
public void cacheUpdateMulti(final JoinPoint jp, final Object retVal) throws Throwable {
if (isDisabled()) {
getLogger().info("Cache disabled");
return;
}
// For Update*Cache, an AfterReturning aspect is fine. We will only
// apply our caching after the underlying method completes successfully, and we will have
// the same access to the method params.
try {
final Method methodToCache = getCacheBase().getMethodToCache(jp, UpdateMultiCache.class);
final UpdateMultiCache annotation = methodToCache.getAnnotation(UpdateMultiCache.class);
final AnnotationData data = AnnotationDataBuilder.buildAnnotationData(annotation, UpdateMultiCache.class, methodToCache);
final List<Object> dataList = getCacheBase().<List<Object>> getUpdateData(data, methodToCache, jp.getArgs(), retVal);
final SerializationType serializationType = getCacheBase().getSerializationType(methodToCache);
final MultiCacheCoordinator coord = new MultiCacheCoordinator(methodToCache, data);
coord.setAddNullsToCache(annotation.option().addNullsToCache());
final List<String> cacheKeys;
if (data.isReturnKeyIndex()) {
@SuppressWarnings("unchecked")
final List<Object> keyObjects = (List<Object>) retVal;
coord.setHolder(convertIdObjectsToKeyMap(keyObjects, data));
cacheKeys = getCacheBase().getCacheKeyBuilder().getCacheKeys(keyObjects, data.getNamespace());
} else {
// Create key->object and object->key mappings.
coord.setHolder(createObjectIdCacheKeyMapping(coord.getAnnotationData(), jp.getArgs(), coord.getMethod()));
@SuppressWarnings("unchecked")
List<Object> listKeyObjects = (List<Object>) Utils.getMethodArg(data.getListIndexInMethodArgs(), jp.getArgs(),
methodToCache.toString());
coord.setListKeyObjects(listKeyObjects);
// keySet is sorted
cacheKeys = new ArrayList<String>(coord.getKey2Obj().keySet());
}
if (!annotation.option().addNullsToCache()) {
updateCache(cacheKeys, dataList, methodToCache, data, serializationType);
} else {
Map<String, Object> key2Result = new HashMap<String, Object>();
for (String cacheKey : cacheKeys) {
key2Result.put(cacheKey, null);
}
coord.setInitialKey2Result(key2Result);
updateCacheWithMissed(dataList, coord, annotation.option(), serializationType);
}
} catch (Exception ex) {
warn(ex, "Updating caching via %s aborted due to an error.", jp.toShortString());
}
}
MapHolder convertIdObjectsToKeyMap(final List<Object> idObjects, final AnnotationData data) throws Exception {
final MapHolder holder = new MapHolder();
for (final Object obj : idObjects) {
if (obj == null) {
throw new InvalidParameterException("One of the passed in key objects is null");
}
String cacheKey = getCacheBase().getCacheKeyBuilder().getCacheKey(obj, data.getNamespace());
if (holder.getObj2Key().get(obj) == null) {
holder.getObj2Key().put(obj, cacheKey);
}
if (holder.getKey2Obj().get(cacheKey) == null) {
holder.getKey2Obj().put(cacheKey, obj);
}
}
return holder;
}
void updateCache(final List<String> cacheKeys, final List<Object> returnList, final Method methodToCache, final AnnotationData data,
final SerializationType serializationType) {
if (returnList.size() != cacheKeys.size()) {
throw new InvalidAnnotationException(String.format(
"The key generation objects, and the resulting objects do not match in size for [%s].", methodToCache.toString()));
}
Iterator<Object> returnListIter = returnList.iterator();
Iterator<String> cacheKeyIter = cacheKeys.iterator();
String cacheKey;
Object result, cacheObject;
while (returnListIter.hasNext()) {
result = returnListIter.next();
cacheKey = cacheKeyIter.next();
cacheObject = getCacheBase().getSubmission(result);
getCacheBase().getCache(data).setSilently(cacheKey, data.getExpiration(), cacheObject, serializationType);
}
}
private void updateCacheWithMissed(final List<Object> dataUpdateContents, final MultiCacheCoordinator coord,
final UpdateMultiCacheOption option, final SerializationType serializationType) throws Exception {
if (!dataUpdateContents.isEmpty()) {
List<String> cacheKeys = getCacheBase().getCacheKeyBuilder().getCacheKeys(dataUpdateContents,
coord.getAnnotationData().getNamespace());
String cacheKey;
Iterator<String> iter = cacheKeys.iterator();
for (Object resultObject : dataUpdateContents) {
cacheKey = iter.next();
getCacheBase().getCache(coord.getAnnotationData()).setSilently(cacheKey, coord.getAnnotationData().getExpiration(),
resultObject, serializationType);
coord.getMissedObjects().remove(coord.getKey2Obj().get(cacheKey));
}
}
if (option.overwriteNoNulls()) {
setNullValues(coord.getMissedObjects(), coord, serializationType);
} else {
addNullValues(coord.getMissedObjects(), coord, serializationType);
}
}
@Override
protected Logger getLogger() {
return LOG;
}
}
| {
"pile_set_name": "Github"
} |
{
"_args": [
[
"babel-plugin-transform-es2015-shorthand-properties@^6.3.13",
"C:\\code\\git-oa\\wechat-devtools\\src\\node_modules\\babel-preset-es2015"
]
],
"_cnpm_publish_time": 1462232689061,
"_from": "babel-plugin-transform-es2015-shorthand-properties@>=6.3.13 <7.0.0",
"_id": "[email protected]",
"_inCache": true,
"_installable": true,
"_location": "/babel-plugin-transform-es2015-shorthand-properties",
"_nodeVersion": "5.1.0",
"_npmOperationalInternal": {
"host": "packages-16-east.internal.npmjs.com",
"tmp": "tmp/babel-plugin-transform-es2015-shorthand-properties-6.8.0.tgz_1462232688001_0.6746715707704425"
},
"_npmUser": {
"email": "[email protected]",
"name": "hzoo"
},
"_npmVersion": "3.8.6",
"_phantomChildren": {},
"_requested": {
"name": "babel-plugin-transform-es2015-shorthand-properties",
"raw": "babel-plugin-transform-es2015-shorthand-properties@^6.3.13",
"rawSpec": "^6.3.13",
"scope": null,
"spec": ">=6.3.13 <7.0.0",
"type": "range"
},
"_requiredBy": [
"/babel-preset-es2015"
],
"_resolved": "http://r.tnpm.oa.com/babel-plugin-transform-es2015-shorthand-properties/download/babel-plugin-transform-es2015-shorthand-properties-6.8.0.tgz",
"_shasum": "f0a4c5fd471630acf333c2d99c3d677bf0952149",
"_shrinkwrap": null,
"_spec": "babel-plugin-transform-es2015-shorthand-properties@^6.3.13",
"_where": "C:\\code\\git-oa\\wechat-devtools\\src\\node_modules\\babel-preset-es2015",
"dependencies": {
"babel-runtime": "^6.0.0",
"babel-types": "^6.8.0"
},
"description": "Compile ES2015 shorthand properties to ES5",
"devDependencies": {
"babel-helper-plugin-test-runner": "^6.8.0"
},
"directories": {},
"dist": {
"key": "/babel-plugin-transform-es2015-shorthand-properties/-/babel-plugin-transform-es2015-shorthand-properties-6.8.0.tgz",
"noattachment": false,
"shasum": "f0a4c5fd471630acf333c2d99c3d677bf0952149",
"size": 1119,
"tarball": "http://r.tnpm.oa.com/babel-plugin-transform-es2015-shorthand-properties/download/babel-plugin-transform-es2015-shorthand-properties-6.8.0.tgz"
},
"keywords": [
"babel-plugin"
],
"license": "MIT",
"main": "lib/index.js",
"maintainers": [
{
"email": "[email protected]",
"name": "amasad"
},
{
"email": "[email protected]",
"name": "hzoo"
},
{
"email": "[email protected]",
"name": "jmm"
},
{
"email": "[email protected]",
"name": "loganfsmyth"
},
{
"email": "[email protected]",
"name": "sebmck"
},
{
"email": "[email protected]",
"name": "thejameskyle"
}
],
"name": "babel-plugin-transform-es2015-shorthand-properties",
"optionalDependencies": {},
"publish_time": 1462232689061,
"readme": "ERROR: No README data found!",
"repository": {
"type": "git",
"url": "https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-es2015-shorthand-properties"
},
"scripts": {},
"version": "6.8.0"
}
| {
"pile_set_name": "Github"
} |
var common = require('./common');
var fs = require('fs');
var path = require('path');
var PERMS = (function (base) {
return {
OTHER_EXEC : base.EXEC,
OTHER_WRITE : base.WRITE,
OTHER_READ : base.READ,
GROUP_EXEC : base.EXEC << 3,
GROUP_WRITE : base.WRITE << 3,
GROUP_READ : base.READ << 3,
OWNER_EXEC : base.EXEC << 6,
OWNER_WRITE : base.WRITE << 6,
OWNER_READ : base.READ << 6,
// Literal octal numbers are apparently not allowed in "strict" javascript. Using parseInt is
// the preferred way, else a jshint warning is thrown.
STICKY : parseInt('01000', 8),
SETGID : parseInt('02000', 8),
SETUID : parseInt('04000', 8),
TYPE_MASK : parseInt('0770000', 8)
};
})({
EXEC : 1,
WRITE : 2,
READ : 4
});
//@
//@ ### chmod(octal_mode || octal_string, file)
//@ ### chmod(symbolic_mode, file)
//@
//@ Available options:
//@
//@ + `-v`: output a diagnostic for every file processed//@
//@ + `-c`: like verbose but report only when a change is made//@
//@ + `-R`: change files and directories recursively//@
//@
//@ Examples:
//@
//@ ```javascript
//@ chmod(755, '/Users/brandon');
//@ chmod('755', '/Users/brandon'); // same as above
//@ chmod('u+x', '/Users/brandon');
//@ ```
//@
//@ Alters the permissions of a file or directory by either specifying the
//@ absolute permissions in octal form or expressing the changes in symbols.
//@ This command tries to mimic the POSIX behavior as much as possible.
//@ Notable exceptions:
//@
//@ + In symbolic modes, 'a-r' and '-r' are identical. No consideration is
//@ given to the umask.
//@ + There is no "quiet" option since default behavior is to run silent.
function _chmod(options, mode, filePattern) {
if (!filePattern) {
if (options.length > 0 && options.charAt(0) === '-') {
// Special case where the specified file permissions started with - to subtract perms, which
// get picked up by the option parser as command flags.
// If we are down by one argument and options starts with -, shift everything over.
filePattern = mode;
mode = options;
options = '';
}
else {
common.error('You must specify a file.');
}
}
options = common.parseOptions(options, {
'R': 'recursive',
'c': 'changes',
'v': 'verbose'
});
if (typeof filePattern === 'string') {
filePattern = [ filePattern ];
}
var files;
if (options.recursive) {
files = [];
common.expand(filePattern).forEach(function addFile(expandedFile) {
var stat = fs.lstatSync(expandedFile);
if (!stat.isSymbolicLink()) {
files.push(expandedFile);
if (stat.isDirectory()) { // intentionally does not follow symlinks.
fs.readdirSync(expandedFile).forEach(function (child) {
addFile(expandedFile + '/' + child);
});
}
}
});
}
else {
files = common.expand(filePattern);
}
files.forEach(function innerChmod(file) {
file = path.resolve(file);
if (!fs.existsSync(file)) {
common.error('File not found: ' + file);
}
// When recursing, don't follow symlinks.
if (options.recursive && fs.lstatSync(file).isSymbolicLink()) {
return;
}
var perms = fs.statSync(file).mode;
var type = perms & PERMS.TYPE_MASK;
var newPerms = perms;
if (isNaN(parseInt(mode, 8))) {
// parse options
mode.split(',').forEach(function (symbolicMode) {
/*jshint regexdash:true */
var pattern = /([ugoa]*)([=\+-])([rwxXst]*)/i;
var matches = pattern.exec(symbolicMode);
if (matches) {
var applyTo = matches[1];
var operator = matches[2];
var change = matches[3];
var changeOwner = applyTo.indexOf('u') != -1 || applyTo === 'a' || applyTo === '';
var changeGroup = applyTo.indexOf('g') != -1 || applyTo === 'a' || applyTo === '';
var changeOther = applyTo.indexOf('o') != -1 || applyTo === 'a' || applyTo === '';
var changeRead = change.indexOf('r') != -1;
var changeWrite = change.indexOf('w') != -1;
var changeExec = change.indexOf('x') != -1;
var changeSticky = change.indexOf('t') != -1;
var changeSetuid = change.indexOf('s') != -1;
var mask = 0;
if (changeOwner) {
mask |= (changeRead ? PERMS.OWNER_READ : 0) + (changeWrite ? PERMS.OWNER_WRITE : 0) + (changeExec ? PERMS.OWNER_EXEC : 0) + (changeSetuid ? PERMS.SETUID : 0);
}
if (changeGroup) {
mask |= (changeRead ? PERMS.GROUP_READ : 0) + (changeWrite ? PERMS.GROUP_WRITE : 0) + (changeExec ? PERMS.GROUP_EXEC : 0) + (changeSetuid ? PERMS.SETGID : 0);
}
if (changeOther) {
mask |= (changeRead ? PERMS.OTHER_READ : 0) + (changeWrite ? PERMS.OTHER_WRITE : 0) + (changeExec ? PERMS.OTHER_EXEC : 0);
}
// Sticky bit is special - it's not tied to user, group or other.
if (changeSticky) {
mask |= PERMS.STICKY;
}
switch (operator) {
case '+':
newPerms |= mask;
break;
case '-':
newPerms &= ~mask;
break;
case '=':
newPerms = type + mask;
// According to POSIX, when using = to explicitly set the permissions, setuid and setgid can never be cleared.
if (fs.statSync(file).isDirectory()) {
newPerms |= (PERMS.SETUID + PERMS.SETGID) & perms;
}
break;
}
if (options.verbose) {
log(file + ' -> ' + newPerms.toString(8));
}
if (perms != newPerms) {
if (!options.verbose && options.changes) {
log(file + ' -> ' + newPerms.toString(8));
}
fs.chmodSync(file, newPerms);
}
}
else {
common.error('Invalid symbolic mode change: ' + symbolicMode);
}
});
}
else {
// they gave us a full number
newPerms = type + parseInt(mode, 8);
// POSIX rules are that setuid and setgid can only be added using numeric form, but not cleared.
if (fs.statSync(file).isDirectory()) {
newPerms |= (PERMS.SETUID + PERMS.SETGID) & perms;
}
fs.chmodSync(file, newPerms);
}
});
}
module.exports = _chmod;
| {
"pile_set_name": "Github"
} |
/*
* This file is part of ELKI:
* Environment for Developing KDD-Applications Supported by Index-Structures
*
* Copyright (C) 2019
* ELKI Development Team
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package elki.visualization.batikutil;
import java.awt.Component;
import java.awt.event.ComponentAdapter;
import java.awt.event.ComponentEvent;
/**
* Class to lazily process canvas resize events by applying a threshold.
*
* @author Erich Schubert
* @since 0.2
*/
public abstract class LazyCanvasResizer extends ComponentAdapter {
/**
* Default threshold for resizing.
*/
public static final double DEFAULT_THRESHOLD = 0.05;
/**
* Active threshold
*/
double threshold;
/**
* Last ratio of the Canvas applied
*/
double activeRatio;
/**
* Component the ratio applies to.
*/
Component component;
/**
* Full constructor.
*
* @param component Component to track
* @param threshold Threshold
*/
public LazyCanvasResizer(Component component, double threshold) {
super();
this.threshold = threshold;
this.component = component;
this.activeRatio = getCurrentRatio();
}
/**
* Simplified constructor using the default threshold {@link #DEFAULT_THRESHOLD}
*
* @param component Component to track.
*/
public LazyCanvasResizer(Component component) {
this(component, DEFAULT_THRESHOLD);
}
/**
* React to a component resize event.
*/
@Override
public void componentResized(ComponentEvent e) {
if (e.getComponent() == component) {
double newRatio = getCurrentRatio();
if (Math.abs(newRatio - activeRatio) > threshold) {
activeRatio = newRatio;
executeResize(newRatio);
}
}
}
/**
* Get the components current ratio.
*
* @return Current ratio.
*/
public final double getCurrentRatio() {
return (double) component.getWidth() / (double) component.getHeight();
}
/**
* Callback function that needs to be overridden with actual implementations.
*
* @param newratio New ratio to apply.
*/
public abstract void executeResize(double newratio);
/**
* Get the components last applied ratio.
*
* @return Last applied ratio.
*/
public double getActiveRatio() {
return activeRatio;
}
}
| {
"pile_set_name": "Github"
} |
// -*- C++ -*-
/**
* @file Method_Request_Lookup.h
*
* @author Pradeep Gore <[email protected]>
*/
#ifndef TAO_Notify_LOOKUP_METHOD_REQUEST_H
#define TAO_Notify_LOOKUP_METHOD_REQUEST_H
#include /**/ "ace/pre.h"
#include "orbsvcs/Notify/notify_serv_export.h"
#if !defined (ACE_LACKS_PRAGMA_ONCE)
# pragma once
#endif /* ACE_LACKS_PRAGMA_ONCE */
#include "orbsvcs/ESF/ESF_Worker.h"
#include "orbsvcs/Notify/Method_Request_Event.h"
#include "orbsvcs/Notify/ProxySupplier.h"
#include "orbsvcs/Notify/ProxyConsumer.h"
#include "orbsvcs/Notify/Consumer_Map.h"
#include "orbsvcs/Notify/Delivery_Request.h"
#include "orbsvcs/ESF/ESF_Worker.h"
TAO_BEGIN_VERSIONED_NAMESPACE_DECL
class TAO_Notify_Method_Request_Lookup_Queueable;
class TAO_Notify_Event;
/**
* @class TAO_Notify_Method_Request_Lookup
*
* @brief
*/
class TAO_Notify_Serv_Export TAO_Notify_Method_Request_Lookup
: public TAO_ESF_Worker<TAO_Notify_ProxySupplier>
, public TAO_Notify_Method_Request_Event
{
public:
/// an arbitrary code (Octet) to identify this type of request in persistent storage
enum {persistence_code = 2};
/// Destructor
virtual ~TAO_Notify_Method_Request_Lookup ();
/// Static method used to reconstruct a Method Request Dispatch
static TAO_Notify_Method_Request_Lookup_Queueable * unmarshal (
TAO_Notify::Delivery_Request_Ptr & delivery_request,
TAO_Notify_EventChannelFactory &ecf,
TAO_InputCDR & cdr);
protected:
/// Constructor
TAO_Notify_Method_Request_Lookup (const TAO_Notify_Event * event, TAO_Notify_ProxyConsumer * proxy);
/// Constructor
TAO_Notify_Method_Request_Lookup (const TAO_Notify::Delivery_Request_Ptr& delivery, TAO_Notify_ProxyConsumer * proxy);
/// Execute the dispatch operation.
int execute_i (void);
///= TAO_ESF_Worker method
virtual void work (TAO_Notify_ProxySupplier* proxy_supplier);
protected:
/// The Proxy
TAO_Notify_ProxyConsumer* proxy_consumer_;
};
/***************************************************************/
/**
* @class TAO_Notify_Method_Request_Lookup_Queueable
*
* @brief Lookup command object looks up the event type of the given event in the consumer map and send the event to each proxysupplier.
*/
class TAO_Notify_Serv_Export TAO_Notify_Method_Request_Lookup_Queueable
: public TAO_Notify_Method_Request_Lookup
, public TAO_Notify_Method_Request_Queueable
{
public:
/// Constructor from event
TAO_Notify_Method_Request_Lookup_Queueable (
const TAO_Notify_Event::Ptr& event,
TAO_Notify_ProxyConsumer * proxy_consumer);
/// Constructor from delivery request
TAO_Notify_Method_Request_Lookup_Queueable (
TAO_Notify::Delivery_Request_Ptr & request,
TAO_Notify_ProxyConsumer * proxy_consumer);
/// Destructor
virtual ~TAO_Notify_Method_Request_Lookup_Queueable ();
/// Execute the Request
virtual int execute (void);
private:
TAO_Notify_Event::Ptr event_var_;
TAO_Notify_ProxyConsumer::Ptr proxy_guard_;
};
/*****************************************************************************************************************************/
/**
* @class TAO_Notify_Method_Request_Lookup_No_Copy
*
* @brief Lookup command object looks up the event type of the given event in the consumer map and send the event to each proxysupplier.
*/
class TAO_Notify_Serv_Export TAO_Notify_Method_Request_Lookup_No_Copy
: public TAO_Notify_Method_Request_Lookup
, public TAO_Notify_Method_Request
{
public:
/// Constructor
TAO_Notify_Method_Request_Lookup_No_Copy (
const TAO_Notify_Event* event,
TAO_Notify_ProxyConsumer* proxy_consumer);
/// Destructor
virtual ~TAO_Notify_Method_Request_Lookup_No_Copy ();
/// Execute the Request
virtual int execute (void);
/// Create a copy of this object.
virtual TAO_Notify_Method_Request_Queueable* copy (void);
};
TAO_END_VERSIONED_NAMESPACE_DECL
#include /**/ "ace/post.h"
#endif /* TAO_Notify_LOOKUP_METHOD_REQUEST_H */
| {
"pile_set_name": "Github"
} |
{
"layout" : {
"options" : [
"directionLeadingToTrailing"
],
"formats" : [
"H:|-left-[avatar]-spaceH-[title]-right-|",
"H:|-left-[avatar]-spaceH-[subtitle]-right-|",
"V:|-top-[title]-spaceV-[subtitle]-(>=bottom)-|",
"V:|-top-[avatar]-(>=bottom)-|"
],
"metrics" : {
"left" : 20,
"right" : 20,
"spaceH" : 10,
"bottom" : 20,
"top" : 20,
"spaceV" : 10
}
},
"name" : "article",
"height" : 60.0,
"outlet" : "button",
"style" : {
"backgroundColor" : "#FFFFFF"
},
"bricks" : [
{
"targetClass" : "UILabel",
"style" : {
"attributedText" : [
{
"NSFont" : {
"size" : 16,
"name" : "Helvetica"
},
"NSColor" : "#FF0000"
},
{
"NSFont" : {
"size" : 20,
"name" : "Avenir-Book"
},
"NSColor" : "#000000"
},
{
"NSFont" : {
"size" : 16,
"name" : "Avenir-Book"
},
"NSColor" : "#AAAAAA"
}
],
"translatesAutoresizingMaskIntoConstraints" : false,
"numberOfLines" : 0
},
"name" : "title"
},
{
"targetClass" : "UILabel",
"style" : {
"numberOfLines" : 0,
"textColor" : "#AAAAAA",
"text" : "Default text",
"font" : {
"size" : 12,
"name" : ".SFUIText"
}
},
"name" : "subtitle"
},
{
"name" : "avatar",
"style" : {
"backgroundColor" : "#00FF00",
"ratio" : 1.5
},
"width" : 68.0,
"targetClass" : "UIImageView"
}
],
"targetClass" : "UIView"
}
| {
"pile_set_name": "Github"
} |
DEAL:0::2d OK
DEAL:0::3d OK
| {
"pile_set_name": "Github"
} |
package datadog.trace.agent.tooling.context;
import static datadog.trace.agent.tooling.ClassLoaderMatcher.BOOTSTRAP_CLASSLOADER;
import static datadog.trace.agent.tooling.bytebuddy.matcher.DDElementMatchers.safeHasSuperType;
import static net.bytebuddy.matcher.ElementMatchers.named;
import datadog.trace.agent.tooling.HelperInjector;
import datadog.trace.agent.tooling.Instrumenter;
import datadog.trace.agent.tooling.Instrumenter.Default;
import datadog.trace.agent.tooling.Utils;
import datadog.trace.api.Config;
import datadog.trace.bootstrap.ContextStore;
import datadog.trace.bootstrap.FieldBackedContextStoreAppliedMarker;
import datadog.trace.bootstrap.InstrumentationContext;
import datadog.trace.bootstrap.WeakMap;
import java.lang.reflect.Method;
import java.security.ProtectionDomain;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.Map;
import java.util.Set;
import lombok.extern.slf4j.Slf4j;
import net.bytebuddy.ByteBuddy;
import net.bytebuddy.ClassFileVersion;
import net.bytebuddy.agent.builder.AgentBuilder;
import net.bytebuddy.asm.AsmVisitorWrapper;
import net.bytebuddy.description.field.FieldDescription;
import net.bytebuddy.description.field.FieldList;
import net.bytebuddy.description.method.MethodList;
import net.bytebuddy.description.modifier.TypeManifestation;
import net.bytebuddy.description.modifier.Visibility;
import net.bytebuddy.description.type.TypeDescription;
import net.bytebuddy.dynamic.DynamicType;
import net.bytebuddy.implementation.Implementation;
import net.bytebuddy.jar.asm.ClassVisitor;
import net.bytebuddy.jar.asm.ClassWriter;
import net.bytebuddy.jar.asm.FieldVisitor;
import net.bytebuddy.jar.asm.Label;
import net.bytebuddy.jar.asm.MethodVisitor;
import net.bytebuddy.jar.asm.Opcodes;
import net.bytebuddy.jar.asm.Type;
import net.bytebuddy.pool.TypePool;
import net.bytebuddy.utility.JavaModule;
/**
* InstrumentationContextProvider which stores context in a field that is injected into a class and
* falls back to global map if field was not injected.
*
* <p>This is accomplished by
*
* <ol>
* <li>Injecting a Dynamic Interface that provides getter and setter for context field
* <li>Applying Dynamic Interface to a type needing context, implementing interface methods and
* adding context storage field
* <li>Injecting a Dynamic Class created from {@link ContextStoreImplementationTemplate} to use
* injected field or fall back to a static map
* <li>Rewritting calls to the context-store to access the specific dynamic {@link
* ContextStoreImplementationTemplate}
* </ol>
*
* <p>Example:<br>
* <em>InstrumentationContext.get(Runnable.class, RunnableState.class)")</em><br>
* is rewritten to:<br>
* <em>FieldBackedProvider$ContextStore$Runnable$RunnableState12345.getContextStore(runnableRunnable.class,
* RunnableState.class)</em>
*/
@Slf4j
public class FieldBackedProvider implements InstrumentationContextProvider {
/**
* Note: the value here has to be inside on of the prefixes in
* datadog.trace.agent.tooling.Constants#BOOTSTRAP_PACKAGE_PREFIXES. This ensures that 'isolating'
* (or 'module') classloaders like jboss and osgi see injected classes. This works because we
* instrument those classloaders to load everything inside bootstrap packages.
*/
private static final String DYNAMIC_CLASSES_PACKAGE =
"datadog.trace.bootstrap.instrumentation.context.";
private static final String INJECTED_FIELDS_MARKER_CLASS_NAME =
Utils.getInternalName(FieldBackedContextStoreAppliedMarker.class.getName());
private static final Method CONTEXT_GET_METHOD;
private static final Method GET_CONTEXT_STORE_METHOD;
static {
try {
CONTEXT_GET_METHOD = InstrumentationContext.class.getMethod("get", Class.class, Class.class);
GET_CONTEXT_STORE_METHOD =
ContextStoreImplementationTemplate.class.getMethod(
"getContextStore", Class.class, Class.class);
} catch (final Exception e) {
throw new IllegalStateException(e);
}
}
private final Instrumenter.Default instrumenter;
private final ByteBuddy byteBuddy;
private final Map<String, String> contextStore;
/** fields-accessor-interface-name -> fields-accessor-interface-dynamic-type */
private final Map<String, DynamicType.Unloaded<?>> fieldAccessorInterfaces;
private final AgentBuilder.Transformer fieldAccessorInterfacesInjector;
/** context-store-type-name -> context-store-type-name-dynamic-type */
private final Map<String, DynamicType.Unloaded<?>> contextStoreImplementations;
private final AgentBuilder.Transformer contextStoreImplementationsInjector;
private final boolean fieldInjectionEnabled;
public FieldBackedProvider(
final Instrumenter.Default instrumenter, Map<String, String> contextStore) {
this.instrumenter = instrumenter;
this.contextStore = contextStore;
byteBuddy = new ByteBuddy();
fieldAccessorInterfaces = generateFieldAccessorInterfaces();
fieldAccessorInterfacesInjector = bootstrapHelperInjector(fieldAccessorInterfaces.values());
contextStoreImplementations = generateContextStoreImplementationClasses();
contextStoreImplementationsInjector =
bootstrapHelperInjector(contextStoreImplementations.values());
fieldInjectionEnabled = Config.get().isRuntimeContextFieldInjection();
}
@Override
public AgentBuilder.Identified.Extendable instrumentationTransformer(
AgentBuilder.Identified.Extendable builder) {
if (!contextStore.isEmpty()) {
/*
* Install transformer that rewrites accesses to context store with specialized bytecode that
* invokes appropriate storage implementation.
*/
builder =
builder.transform(getTransformerForASMVisitor(getContextStoreReadsRewritingVisitor()));
builder = injectHelpersIntoBootstrapClassloader(builder);
}
return builder;
}
private AsmVisitorWrapper getContextStoreReadsRewritingVisitor() {
return new AsmVisitorWrapper() {
@Override
public int mergeWriter(final int flags) {
return flags | ClassWriter.COMPUTE_MAXS;
}
@Override
public int mergeReader(final int flags) {
return flags;
}
@Override
public ClassVisitor wrap(
final TypeDescription instrumentedType,
final ClassVisitor classVisitor,
final Implementation.Context implementationContext,
final TypePool typePool,
final FieldList<FieldDescription.InDefinedShape> fields,
final MethodList<?> methods,
final int writerFlags,
final int readerFlags) {
return new ClassVisitor(Opcodes.ASM7, classVisitor) {
@Override
public void visit(
final int version,
final int access,
final String name,
final String signature,
final String superName,
final String[] interfaces) {
super.visit(version, access, name, signature, superName, interfaces);
}
@Override
public MethodVisitor visitMethod(
final int access,
final String name,
final String descriptor,
final String signature,
final String[] exceptions) {
final MethodVisitor mv =
super.visitMethod(access, name, descriptor, signature, exceptions);
return new MethodVisitor(Opcodes.ASM7, mv) {
/** The most recent objects pushed to the stack. */
private final Object[] stack = {null, null};
/** Most recent instructions. */
private final int[] insnStack = {-1, -1, -1};
@Override
public void visitMethodInsn(
final int opcode,
final String owner,
final String name,
final String descriptor,
final boolean isInterface) {
pushOpcode(opcode);
if (Utils.getInternalName(CONTEXT_GET_METHOD.getDeclaringClass().getName())
.equals(owner)
&& CONTEXT_GET_METHOD.getName().equals(name)
&& Type.getMethodDescriptor(CONTEXT_GET_METHOD).equals(descriptor)) {
log.debug("Found context-store access in {}", instrumenter.getClass().getName());
/*
The idea here is that the rest if this method visitor collects last three instructions in `insnStack`
variable. Once we get here we check if those last three instructions constitute call that looks like
`InstrumentationContext.get(K.class, V.class)`. If it does the inside of this if rewrites it to call
dynamically injected context store implementation instead.
*/
if ((insnStack[0] == Opcodes.INVOKESTATIC
&& insnStack[1] == Opcodes.LDC
&& insnStack[2] == Opcodes.LDC)
&& (stack[0] instanceof Type && stack[1] instanceof Type)) {
final String contextClassName = ((Type) stack[0]).getClassName();
final String keyClassName = ((Type) stack[1]).getClassName();
final TypeDescription contextStoreImplementationClass =
getContextStoreImplementation(keyClassName, contextClassName);
if (log.isDebugEnabled()) {
log.debug(
"Rewriting context-store map fetch for instrumenter {}: {} -> {}",
instrumenter.getClass().getName(),
keyClassName,
contextClassName);
}
if (contextStoreImplementationClass == null) {
throw new IllegalStateException(
String.format(
"Incorrect Context Api Usage detected. Cannot find map holder class for %s context %s. Was that class defined in contextStore for instrumentation %s?",
keyClassName, contextClassName, instrumenter.getClass().getName()));
}
if (!contextClassName.equals(contextStore.get(keyClassName))) {
throw new IllegalStateException(
String.format(
"Incorrect Context Api Usage detected. Incorrect context class %s, expected %s for instrumentation %s",
contextClassName,
contextStore.get(keyClassName),
instrumenter.getClass().getName()));
}
// stack: contextClass | keyClass
mv.visitMethodInsn(
Opcodes.INVOKESTATIC,
contextStoreImplementationClass.getInternalName(),
GET_CONTEXT_STORE_METHOD.getName(),
Type.getMethodDescriptor(GET_CONTEXT_STORE_METHOD),
false);
return;
}
throw new IllegalStateException(
"Incorrect Context Api Usage detected. Key and context class must be class-literals. Example of correct usage: InstrumentationContext.get(Runnable.class, RunnableContext.class)");
} else {
super.visitMethodInsn(opcode, owner, name, descriptor, isInterface);
}
}
/** Tracking the most recently used opcodes to assert proper api usage. */
private void pushOpcode(final int opcode) {
System.arraycopy(insnStack, 0, insnStack, 1, insnStack.length - 1);
insnStack[0] = opcode;
}
/**
* Tracking the most recently pushed objects on the stack to assert proper api usage.
*/
private void pushStack(final Object o) {
System.arraycopy(stack, 0, stack, 1, stack.length - 1);
stack[0] = o;
}
@Override
public void visitInsn(final int opcode) {
pushOpcode(opcode);
super.visitInsn(opcode);
}
@Override
public void visitJumpInsn(final int opcode, final Label label) {
pushOpcode(opcode);
super.visitJumpInsn(opcode, label);
}
@Override
public void visitIntInsn(final int opcode, final int operand) {
pushOpcode(opcode);
super.visitIntInsn(opcode, operand);
}
@Override
public void visitVarInsn(final int opcode, final int var) {
pushOpcode(opcode);
pushStack(var);
super.visitVarInsn(opcode, var);
}
@Override
public void visitLdcInsn(final Object value) {
pushOpcode(Opcodes.LDC);
pushStack(value);
super.visitLdcInsn(value);
}
};
}
};
}
};
}
private AgentBuilder.Identified.Extendable injectHelpersIntoBootstrapClassloader(
AgentBuilder.Identified.Extendable builder) {
/*
* We inject into bootstrap classloader because field accessor interfaces are needed by context
* store implementations. Unfortunately this forces us to remove stored type checking because
* actual classes may not be available at this point.
*/
builder = builder.transform(fieldAccessorInterfacesInjector);
/*
* We inject context store implementation into bootstrap classloader because same implementation
* may be used by different instrumentations and it has to use same static map in case of
* fallback to map-backed storage.
*/
builder = builder.transform(contextStoreImplementationsInjector);
return builder;
}
/** Get transformer that forces helper injection onto bootstrap classloader. */
private AgentBuilder.Transformer bootstrapHelperInjector(
final Collection<DynamicType.Unloaded<?>> helpers) {
// TODO: Better to pass through the context of the Instrumenter
return new AgentBuilder.Transformer() {
final HelperInjector injector =
HelperInjector.forDynamicTypes(getClass().getSimpleName(), helpers);
@Override
public DynamicType.Builder<?> transform(
final DynamicType.Builder<?> builder,
final TypeDescription typeDescription,
final ClassLoader classLoader,
final JavaModule module) {
return injector.transform(
builder,
typeDescription,
// context store implementation classes will always go to the bootstrap
BOOTSTRAP_CLASSLOADER,
module);
}
};
}
/*
Set of pairs (context holder, context class) for which we have matchers installed.
We use this to make sure we do not install matchers repeatedly for cases when same
context class is used by multiple instrumentations.
*/
private static final Set<Map.Entry<String, String>> INSTALLED_CONTEXT_MATCHERS = new HashSet<>();
/** Clear set that prevents multiple matchers for same context class */
public static void resetContextMatchers() {
synchronized (INSTALLED_CONTEXT_MATCHERS) {
INSTALLED_CONTEXT_MATCHERS.clear();
}
}
@Override
public AgentBuilder.Identified.Extendable additionalInstrumentation(
AgentBuilder.Identified.Extendable builder) {
if (fieldInjectionEnabled) {
for (final Map.Entry<String, String> entry : contextStore.entrySet()) {
/*
* For each context store defined in a current instrumentation we create an agent builder
* that injects necessary fields.
* Note: this synchronization should not have any impact on performance
* since this is done when agent builder is being made, it doesn't affect actual
* class transformation.
*/
synchronized (INSTALLED_CONTEXT_MATCHERS) {
// FIXME: This makes an assumption that class loader matchers for instrumenters that use
// same context classes should be the same - which seems reasonable, but is not checked.
// Addressing this properly requires some notion of 'compound intrumenters' which we
// currently do not have.
if (INSTALLED_CONTEXT_MATCHERS.contains(entry)) {
log.debug("Skipping builder for {} {}", instrumenter.getClass().getName(), entry);
continue;
}
log.debug("Making builder for {} {}", instrumenter.getClass().getName(), entry);
INSTALLED_CONTEXT_MATCHERS.add(entry);
/*
* For each context store defined in a current instrumentation we create an agent builder
* that injects necessary fields.
*/
builder =
builder
.type(safeHasSuperType(named(entry.getKey())), instrumenter.classLoaderMatcher())
.and(safeToInjectFieldsMatcher())
.and(Default.NOT_DECORATOR_MATCHER)
.transform(NoOpTransformer.INSTANCE);
/*
* We inject helpers here as well as when instrumentation is applied to ensure that
* helpers are present even if instrumented classes are not loaded, but classes with state
* fields added are loaded (e.g. sun.net.www.protocol.https.HttpsURLConnectionImpl).
*/
builder = injectHelpersIntoBootstrapClassloader(builder);
builder =
builder.transform(
getTransformerForASMVisitor(
getFieldInjectionVisitor(entry.getKey(), entry.getValue())));
}
}
}
return builder;
}
private static AgentBuilder.RawMatcher safeToInjectFieldsMatcher() {
return new AgentBuilder.RawMatcher() {
@Override
public boolean matches(
final TypeDescription typeDescription,
final ClassLoader classLoader,
final JavaModule module,
final Class<?> classBeingRedefined,
final ProtectionDomain protectionDomain) {
/*
* The idea here is that we can add fields if class is just being loaded
* (classBeingRedefined == null) and we have to add same fields again if class we added
* fields before is being transformed again. Note: here we assume that Class#getInterfaces()
* returns list of interfaces defined immediately on a given class, not inherited from its
* parents. It looks like current JVM implementation does exactly this but javadoc is not
* explicit about that.
*/
return classBeingRedefined == null
|| Arrays.asList(classBeingRedefined.getInterfaces())
.contains(FieldBackedContextStoreAppliedMarker.class);
}
};
}
private AsmVisitorWrapper getFieldInjectionVisitor(
final String keyClassName, final String contextClassName) {
return new AsmVisitorWrapper() {
@Override
public int mergeWriter(final int flags) {
return flags | ClassWriter.COMPUTE_MAXS;
}
@Override
public int mergeReader(final int flags) {
return flags;
}
@Override
public ClassVisitor wrap(
final TypeDescription instrumentedType,
final ClassVisitor classVisitor,
final Implementation.Context implementationContext,
final TypePool typePool,
final FieldList<FieldDescription.InDefinedShape> fields,
final MethodList<?> methods,
final int writerFlags,
final int readerFlags) {
return new ClassVisitor(Opcodes.ASM7, classVisitor) {
// We are using Object class name instead of contextClassName here because this gets
// injected onto Bootstrap classloader where context class may be unavailable
private final TypeDescription contextType =
new TypeDescription.ForLoadedType(Object.class);
private final String fieldName = getContextFieldName(keyClassName);
private final String getterMethodName = getContextGetterName(keyClassName);
private final String setterMethodName = getContextSetterName(keyClassName);
private final TypeDescription interfaceType =
getFieldAccessorInterface(keyClassName, contextClassName);
private boolean foundField = false;
private boolean foundGetter = false;
private boolean foundSetter = false;
@Override
public void visit(
final int version,
final int access,
final String name,
final String signature,
final String superName,
String[] interfaces) {
if (interfaces == null) {
interfaces = new String[] {};
}
final Set<String> set = new LinkedHashSet<>(Arrays.asList(interfaces));
set.add(INJECTED_FIELDS_MARKER_CLASS_NAME);
set.add(interfaceType.getInternalName());
super.visit(version, access, name, signature, superName, set.toArray(new String[] {}));
}
@Override
public FieldVisitor visitField(
final int access,
final String name,
final String descriptor,
final String signature,
final Object value) {
if (name.equals(fieldName)) {
foundField = true;
}
return super.visitField(access, name, descriptor, signature, value);
}
@Override
public MethodVisitor visitMethod(
final int access,
final String name,
final String descriptor,
final String signature,
final String[] exceptions) {
if (name.equals(getterMethodName)) {
foundGetter = true;
}
if (name.equals(setterMethodName)) {
foundSetter = true;
}
return super.visitMethod(access, name, descriptor, signature, exceptions);
}
@Override
public void visitEnd() {
// Checking only for field existence is not enough as libraries like CGLIB only copy
// public/protected methods and not fields (neither public nor private ones) when
// they enhance a class.
// For this reason we check separately for the field and for the two accessors.
if (!foundField) {
cv.visitField(
// Field should be transient to avoid being serialized with the object.
Opcodes.ACC_PRIVATE | Opcodes.ACC_TRANSIENT,
fieldName,
contextType.getDescriptor(),
null,
null);
}
if (!foundGetter) {
addGetter();
}
if (!foundSetter) {
addSetter();
}
super.visitEnd();
}
/** Just 'standard' getter implementation */
private void addGetter() {
final MethodVisitor mv = getAccessorMethodVisitor(getterMethodName);
mv.visitCode();
mv.visitVarInsn(Opcodes.ALOAD, 0);
mv.visitFieldInsn(
Opcodes.GETFIELD,
instrumentedType.getInternalName(),
fieldName,
contextType.getDescriptor());
mv.visitInsn(Opcodes.ARETURN);
mv.visitMaxs(0, 0);
mv.visitEnd();
}
/** Just 'standard' setter implementation */
private void addSetter() {
final MethodVisitor mv = getAccessorMethodVisitor(setterMethodName);
mv.visitCode();
mv.visitVarInsn(Opcodes.ALOAD, 0);
mv.visitVarInsn(Opcodes.ALOAD, 1);
mv.visitFieldInsn(
Opcodes.PUTFIELD,
instrumentedType.getInternalName(),
fieldName,
contextType.getDescriptor());
mv.visitInsn(Opcodes.RETURN);
mv.visitMaxs(0, 0);
mv.visitEnd();
}
private MethodVisitor getAccessorMethodVisitor(final String methodName) {
return cv.visitMethod(
Opcodes.ACC_PUBLIC,
methodName,
Utils.getMethodDefinition(interfaceType, methodName).getDescriptor(),
null,
null);
}
};
}
};
}
private TypeDescription getContextStoreImplementation(
final String keyClassName, final String contextClassName) {
final DynamicType.Unloaded<?> type =
contextStoreImplementations.get(
getContextStoreImplementationClassName(keyClassName, contextClassName));
if (type == null) {
return null;
} else {
return type.getTypeDescription();
}
}
private Map<String, DynamicType.Unloaded<?>> generateContextStoreImplementationClasses() {
final Map<String, DynamicType.Unloaded<?>> contextStoreImplementations =
new HashMap<>(contextStore.size());
for (final Map.Entry<String, String> entry : contextStore.entrySet()) {
final DynamicType.Unloaded<?> type =
makeContextStoreImplementationClass(entry.getKey(), entry.getValue());
contextStoreImplementations.put(type.getTypeDescription().getName(), type);
}
return Collections.unmodifiableMap(contextStoreImplementations);
}
/**
* Generate an 'implementation' of a context store classfor given key class name and context class
* name
*
* @param keyClassName key class name
* @param contextClassName context class name
* @return unloaded dynamic type containing generated class
*/
private DynamicType.Unloaded<?> makeContextStoreImplementationClass(
final String keyClassName, final String contextClassName) {
return byteBuddy
.rebase(ContextStoreImplementationTemplate.class)
.modifiers(Visibility.PUBLIC, TypeManifestation.FINAL)
.name(getContextStoreImplementationClassName(keyClassName, contextClassName))
.visit(getContextStoreImplementationVisitor(keyClassName, contextClassName))
.make();
}
/**
* Returns a visitor that 'fills in' missing methods into concrete implementation of
* ContextStoreImplementationTemplate for given key class name and context class name
*
* @param keyClassName key class name
* @param contextClassName context class name
* @return visitor that adds implementation for methods that need to be generated
*/
private AsmVisitorWrapper getContextStoreImplementationVisitor(
final String keyClassName, final String contextClassName) {
return new AsmVisitorWrapper() {
@Override
public int mergeWriter(final int flags) {
return flags | ClassWriter.COMPUTE_MAXS;
}
@Override
public int mergeReader(final int flags) {
return flags;
}
@Override
public ClassVisitor wrap(
final TypeDescription instrumentedType,
final ClassVisitor classVisitor,
final Implementation.Context implementationContext,
final TypePool typePool,
final FieldList<FieldDescription.InDefinedShape> fields,
final MethodList<?> methods,
final int writerFlags,
final int readerFlags) {
return new ClassVisitor(Opcodes.ASM7, classVisitor) {
private final TypeDescription accessorInterface =
getFieldAccessorInterface(keyClassName, contextClassName);
private final String accessorInterfaceInternalName = accessorInterface.getInternalName();
private final String instrumentedTypeInternalName = instrumentedType.getInternalName();
private final boolean frames =
implementationContext.getClassFileVersion().isAtLeast(ClassFileVersion.JAVA_V6);
@Override
public MethodVisitor visitMethod(
final int access,
final String name,
final String descriptor,
final String signature,
final String[] exceptions) {
if ("realGet".equals(name)) {
generateRealGetMethod(name);
return null;
} else if ("realPut".equals(name)) {
generateRealPutMethod(name);
return null;
} else if ("realSynchronizeInstance".equals(name)) {
generateRealSynchronizeInstanceMethod(name);
return null;
} else {
return super.visitMethod(access, name, descriptor, signature, exceptions);
}
}
/**
* Provides implementation for {@code realGet} method that looks like this
*
* <blockquote>
*
* <pre>
* private Object realGet(final Object key) {
* if (key instanceof $accessorInterfaceInternalName) {
* return (($accessorInterfaceInternalName) key).$getterName();
* } else {
* return mapGet(key);
* }
* }
* </pre>
*
* </blockquote>
*
* @param name name of the method being visited
*/
private void generateRealGetMethod(final String name) {
final String getterName = getContextGetterName(keyClassName);
final Label elseLabel = new Label();
final MethodVisitor mv = getMethodVisitor(name);
mv.visitCode();
mv.visitVarInsn(Opcodes.ALOAD, 1);
mv.visitTypeInsn(Opcodes.INSTANCEOF, accessorInterfaceInternalName);
mv.visitJumpInsn(Opcodes.IFEQ, elseLabel);
mv.visitVarInsn(Opcodes.ALOAD, 1);
mv.visitTypeInsn(Opcodes.CHECKCAST, accessorInterfaceInternalName);
mv.visitMethodInsn(
Opcodes.INVOKEINTERFACE,
accessorInterfaceInternalName,
getterName,
Utils.getMethodDefinition(accessorInterface, getterName).getDescriptor(),
true);
mv.visitInsn(Opcodes.ARETURN);
mv.visitLabel(elseLabel);
if (frames) {
mv.visitFrame(Opcodes.F_SAME, 0, null, 0, null);
}
mv.visitVarInsn(Opcodes.ALOAD, 0);
mv.visitVarInsn(Opcodes.ALOAD, 1);
mv.visitMethodInsn(
Opcodes.INVOKESPECIAL,
instrumentedTypeInternalName,
"mapGet",
Utils.getMethodDefinition(instrumentedType, "mapGet").getDescriptor(),
false);
mv.visitInsn(Opcodes.ARETURN);
mv.visitMaxs(0, 0);
mv.visitEnd();
}
/**
* Provides implementation for {@code realPut} method that looks like this
*
* <blockquote>
*
* <pre>
* private void realPut(final Object key, final Object value) {
* if (key instanceof $accessorInterfaceInternalName) {
* (($accessorInterfaceInternalName) key).$setterName(value);
* } else {
* mapPut(key, value);
* }
* }
* </pre>
*
* </blockquote>
*
* @param name name of the method being visited
*/
private void generateRealPutMethod(final String name) {
final String setterName = getContextSetterName(keyClassName);
final Label elseLabel = new Label();
final Label endLabel = new Label();
final MethodVisitor mv = getMethodVisitor(name);
mv.visitCode();
mv.visitVarInsn(Opcodes.ALOAD, 1);
mv.visitTypeInsn(Opcodes.INSTANCEOF, accessorInterfaceInternalName);
mv.visitJumpInsn(Opcodes.IFEQ, elseLabel);
mv.visitVarInsn(Opcodes.ALOAD, 1);
mv.visitTypeInsn(Opcodes.CHECKCAST, accessorInterfaceInternalName);
mv.visitVarInsn(Opcodes.ALOAD, 2);
mv.visitMethodInsn(
Opcodes.INVOKEINTERFACE,
accessorInterfaceInternalName,
setterName,
Utils.getMethodDefinition(accessorInterface, setterName).getDescriptor(),
true);
mv.visitJumpInsn(Opcodes.GOTO, endLabel);
mv.visitLabel(elseLabel);
if (frames) {
mv.visitFrame(Opcodes.F_SAME, 0, null, 0, null);
}
mv.visitVarInsn(Opcodes.ALOAD, 0);
mv.visitVarInsn(Opcodes.ALOAD, 1);
mv.visitVarInsn(Opcodes.ALOAD, 2);
mv.visitMethodInsn(
Opcodes.INVOKESPECIAL,
instrumentedTypeInternalName,
"mapPut",
Utils.getMethodDefinition(instrumentedType, "mapPut").getDescriptor(),
false);
mv.visitLabel(endLabel);
if (frames) {
mv.visitFrame(Opcodes.F_SAME, 0, null, 0, null);
}
mv.visitInsn(Opcodes.RETURN);
mv.visitMaxs(0, 0);
mv.visitEnd();
}
/**
* Provides implementation for {@code realSynchronizeInstance} method that looks like this
*
* <blockquote>
*
* <pre>
* private Object realSynchronizeInstance(final Object key) {
* if (key instanceof $accessorInterfaceInternalName) {
* return key;
* } else {
* return mapSynchronizeInstance(key);
* }
* }
* </pre>
*
* </blockquote>
*
* @param name name of the method being visited
*/
private void generateRealSynchronizeInstanceMethod(final String name) {
final MethodVisitor mv = getMethodVisitor(name);
mv.visitCode();
mv.visitVarInsn(Opcodes.ALOAD, 1);
mv.visitTypeInsn(Opcodes.INSTANCEOF, accessorInterfaceInternalName);
final Label elseLabel = new Label();
mv.visitJumpInsn(Opcodes.IFEQ, elseLabel);
mv.visitVarInsn(Opcodes.ALOAD, 1);
mv.visitInsn(Opcodes.ARETURN);
mv.visitLabel(elseLabel);
if (frames) {
mv.visitFrame(Opcodes.F_SAME, 0, null, 0, null);
}
mv.visitVarInsn(Opcodes.ALOAD, 0);
mv.visitVarInsn(Opcodes.ALOAD, 1);
mv.visitMethodInsn(
Opcodes.INVOKESPECIAL,
instrumentedTypeInternalName,
"mapSynchronizeInstance",
Utils.getMethodDefinition(instrumentedType, "mapSynchronizeInstance")
.getDescriptor(),
false);
mv.visitInsn(Opcodes.ARETURN);
mv.visitMaxs(0, 0);
mv.visitEnd();
}
private MethodVisitor getMethodVisitor(final String methodName) {
return cv.visitMethod(
Opcodes.ACC_PRIVATE,
methodName,
Utils.getMethodDefinition(instrumentedType, methodName).getDescriptor(),
null,
null);
}
};
}
};
}
/**
* Template class used to generate the class that accesses stored context using either key
* instance's own injected field or global hash map if field is not available.
*/
private static final class ContextStoreImplementationTemplate
implements ContextStore<Object, Object> {
private static final ContextStoreImplementationTemplate INSTANCE =
new ContextStoreImplementationTemplate();
private volatile WeakMap map;
private final Object synchronizationInstance = new Object();
private WeakMap getMap() {
if (null == map) {
synchronized (synchronizationInstance) {
if (null == map) {
this.map = WeakMap.Provider.newWeakMap();
}
}
}
return map;
}
private ContextStoreImplementationTemplate() {}
@Override
public Object get(final Object key) {
return realGet(key);
}
@Override
public Object putIfAbsent(final Object key, final Object context) {
Object existingContext = realGet(key);
if (null != existingContext) {
return existingContext;
}
synchronized (realSynchronizeInstance(key)) {
existingContext = realGet(key);
if (null != existingContext) {
return existingContext;
}
realPut(key, context);
return context;
}
}
@Override
public Object putIfAbsent(final Object key, final Factory<Object> contextFactory) {
Object existingContext = realGet(key);
if (null != existingContext) {
return existingContext;
}
synchronized (realSynchronizeInstance(key)) {
existingContext = realGet(key);
if (null != existingContext) {
return existingContext;
}
final Object context = contextFactory.create();
realPut(key, context);
return context;
}
}
@Override
public void put(final Object key, final Object context) {
realPut(key, context);
}
private Object realGet(final Object key) {
// to be generated
return null;
}
private void realPut(final Object key, final Object value) {
// to be generated
}
private Object realSynchronizeInstance(final Object key) {
// to be generated
return null;
}
private Object mapGet(final Object key) {
return getMap().get(key);
}
private void mapPut(final Object key, final Object value) {
getMap().put(key, value);
}
private Object mapSynchronizeInstance(final Object key) {
return synchronizationInstance;
}
public static ContextStore getContextStore(final Class keyClass, final Class contextClass) {
// We do not actually check the keyClass here - but that should be fine since compiler would
// check things for us.
return INSTANCE;
}
}
private TypeDescription getFieldAccessorInterface(
final String keyClassName, final String contextClassName) {
final DynamicType.Unloaded<?> type =
fieldAccessorInterfaces.get(
getContextAccessorInterfaceName(keyClassName, contextClassName));
if (type == null) {
return null;
} else {
return type.getTypeDescription();
}
}
private Map<String, DynamicType.Unloaded<?>> generateFieldAccessorInterfaces() {
final Map<String, DynamicType.Unloaded<?>> fieldAccessorInterfaces =
new HashMap<>(contextStore.size());
for (final Map.Entry<String, String> entry : contextStore.entrySet()) {
final DynamicType.Unloaded<?> type =
makeFieldAccessorInterface(entry.getKey(), entry.getValue());
fieldAccessorInterfaces.put(type.getTypeDescription().getName(), type);
}
return Collections.unmodifiableMap(fieldAccessorInterfaces);
}
/**
* Generate an interface that provides field accessor methods for given key class name and context
* class name
*
* @param keyClassName key class name
* @param contextClassName context class name
* @return unloaded dynamic type containing generated interface
*/
private DynamicType.Unloaded<?> makeFieldAccessorInterface(
final String keyClassName, final String contextClassName) {
// We are using Object class name instead of contextClassName here because this gets injected
// onto Bootstrap classloader where context class may be unavailable
final TypeDescription contextType = new TypeDescription.ForLoadedType(Object.class);
return byteBuddy
.makeInterface()
.name(getContextAccessorInterfaceName(keyClassName, contextClassName))
.defineMethod(getContextGetterName(keyClassName), contextType, Visibility.PUBLIC)
.withoutCode()
.defineMethod(getContextSetterName(keyClassName), TypeDescription.VOID, Visibility.PUBLIC)
.withParameter(contextType, "value")
.withoutCode()
.make();
}
private AgentBuilder.Transformer getTransformerForASMVisitor(final AsmVisitorWrapper visitor) {
return new AgentBuilder.Transformer() {
@Override
public DynamicType.Builder<?> transform(
final DynamicType.Builder<?> builder,
final TypeDescription typeDescription,
final ClassLoader classLoader,
final JavaModule module) {
return builder.visit(visitor);
}
};
}
private String getContextStoreImplementationClassName(
final String keyClassName, final String contextClassName) {
return DYNAMIC_CLASSES_PACKAGE
+ getClass().getSimpleName()
+ "$ContextStore$"
+ Utils.converToInnerClassName(keyClassName)
+ "$"
+ Utils.converToInnerClassName(contextClassName);
}
private String getContextAccessorInterfaceName(
final String keyClassName, final String contextClassName) {
return DYNAMIC_CLASSES_PACKAGE
+ getClass().getSimpleName()
+ "$ContextAccessor$"
+ Utils.converToInnerClassName(keyClassName)
+ "$"
+ Utils.converToInnerClassName(contextClassName);
}
private static String getContextFieldName(final String keyClassName) {
return "__datadogContext$" + Utils.converToInnerClassName(keyClassName);
}
private static String getContextGetterName(final String keyClassName) {
return "get" + getContextFieldName(keyClassName);
}
private static String getContextSetterName(final String key) {
return "set" + getContextFieldName(key);
}
// Originally found in AgentBuilder.Transformer.NoOp, but removed in 1.10.7
enum NoOpTransformer implements AgentBuilder.Transformer {
INSTANCE;
@Override
public DynamicType.Builder<?> transform(
final DynamicType.Builder<?> builder,
final TypeDescription typeDescription,
final ClassLoader classLoader,
final JavaModule module) {
return builder;
}
}
}
| {
"pile_set_name": "Github"
} |
<vector xmlns:android="http://schemas.android.com/apk/res/android"
android:width="90dp"
android:height="90dp"
android:viewportWidth="90.0"
android:viewportHeight="90.0">
<path
android:pathData="M45,0.39C44.97,0.39 44.94,0.39 44.9,0.39L45,44.98L45,44.99L45,45L45,45L45.02,45L89.61,45.02C89.61,41.2 89.12,37.32 88.09,33.46C88.08,33.42 88.07,33.39 88.06,33.36L88.06,33.36C87.07,29.72 85.62,26.14 83.64,22.71C83.64,22.71 83.63,22.7 83.63,22.7C83.63,22.7 83.63,22.69 83.63,22.69C81.72,19.4 79.37,16.29 76.55,13.47C76.55,13.47 76.55,13.46 76.54,13.46L76.54,13.46C76.52,13.43 76.49,13.41 76.47,13.39C73.79,10.72 70.73,8.35 67.3,6.37C67.28,6.35 67.25,6.34 67.22,6.32L67.22,6.33C63.94,4.45 60.38,2.94 56.56,1.92C56.56,1.92 56.56,1.92 56.56,1.92C56.56,1.92 56.55,1.91 56.54,1.91L56.54,1.91C56.51,1.9 56.48,1.9 56.45,1.89L56.45,1.89C52.8,0.92 48.96,0.39 45,0.39L45,0.39zM45.41,44.29L45.01,44.98L45,44.99L45.41,44.29z"
android:strokeLineCap="butt"
android:fillAlpha="1"
android:strokeColor="#00000000"
android:fillColor="#8ab000"
android:strokeWidth="2"
android:strokeLineJoin="miter"
android:strokeAlpha="1"/>
<path
android:pathData="m45,7.73c-20.57,0 -37.27,16.7 -37.27,37.27 0,20.57 16.7,37.27 37.27,37.27 20.57,0 37.27,-16.7 37.27,-37.27 0,-20.57 -16.7,-37.27 -37.27,-37.27z"
android:strokeLineCap="butt"
android:fillAlpha="1"
android:strokeColor="#00000000"
android:fillColor="#0066cc"
android:strokeWidth="2"
android:strokeLineJoin="miter"
android:strokeAlpha="1"/>
<path
android:pathData="M30.78,56.85h27.97v4.04h-27.97z"
android:fillAlpha="1"
android:strokeColor="#00000000"
android:fillColor="#ffffff"
android:strokeWidth="2"
android:strokeAlpha="1"/>
<path
android:pathData="m38.8,26.93 l0,12.18 -7.71,0 13.86,13.57 13.86,-13.57 -7.83,0 0,-12.18z"
android:fillAlpha="1"
android:strokeColor="#00000000"
android:fillColor="#ffffff"
android:strokeWidth="2"
android:strokeAlpha="1"/>
</vector>
| {
"pile_set_name": "Github"
} |
ace.define("ace/snippets/typescript",["require","exports","module"], function(require, exports, module) {
"use strict";
exports.snippetText =undefined;
exports.scope = "typescript";
});
| {
"pile_set_name": "Github"
} |
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/kinesis/model/DescribeStreamConsumerResult.h>
#include <aws/core/utils/json/JsonSerializer.h>
#include <aws/core/AmazonWebServiceResult.h>
#include <aws/core/utils/StringUtils.h>
#include <aws/core/utils/UnreferencedParam.h>
#include <utility>
using namespace Aws::Kinesis::Model;
using namespace Aws::Utils::Json;
using namespace Aws::Utils;
using namespace Aws;
DescribeStreamConsumerResult::DescribeStreamConsumerResult()
{
}
DescribeStreamConsumerResult::DescribeStreamConsumerResult(const Aws::AmazonWebServiceResult<JsonValue>& result)
{
*this = result;
}
DescribeStreamConsumerResult& DescribeStreamConsumerResult::operator =(const Aws::AmazonWebServiceResult<JsonValue>& result)
{
JsonView jsonValue = result.GetPayload().View();
if(jsonValue.ValueExists("ConsumerDescription"))
{
m_consumerDescription = jsonValue.GetObject("ConsumerDescription");
}
return *this;
}
| {
"pile_set_name": "Github"
} |
#ifndef SB_COARSEGRAIN_H
#define SB_COARSEGRAIN_H
#include <math.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include "stats.h"
#include "helper_functions.h"
extern void sb_coarsegrain(const double y[], const int size, const char how[], const int num_groups, int labels[]);
#endif
| {
"pile_set_name": "Github"
} |
using System;
using System.Threading.Tasks;
namespace CSharpFunctionalExtensions
{
public static partial class ResultExtensions
{
/// <summary>
/// This method should be used in linq queries. We recommend using Bind method.
/// </summary>
public static async Task<Result<TR>> SelectMany<T, TK, TR>(
this Task<Result<T>> resultTask,
Func<T, Task<Result<TK>>> func,
Func<T, TK, TR> project)
{
Result<T> result = await resultTask.DefaultAwait();
return await result.SelectMany(func, project).DefaultAwait();
}
/// <summary>
/// This method should be used in linq queries. We recommend using Bind method.
/// </summary>
public static async Task<Result<TR, TE>> SelectMany<T, TK, TE, TR>(
this Task<Result<T, TE>> resultTask,
Func<T, Task<Result<TK, TE>>> func,
Func<T, TK, TR> project)
{
Result<T, TE> result = await resultTask.DefaultAwait();
return await result.SelectMany(func, project).DefaultAwait();
}
}
} | {
"pile_set_name": "Github"
} |
.grab-box{
width:480px;
height:480px;
border-radius: 20px;
position: absolute;
left:50%;
top:50%;
transform: translate(-50%, -60%);
background: url(../images/grab-succ-bg.png) no-repeat rgba(255,255,255,.6);
background-size: contain;
.grab-title{
width:100%;
height:82px;
line-height:82px;
text-align: center;
font-weight: bold;
font-size: 36px;
color:#333333;
}
.grab-img{
position: relative;
left:50%;
transform: translate(-50%,0);
width:206px;
height:206px;
border-radius: 50%;
overflow: hidden;
img{
display: block;
width:100%;
}
}
.grab-info{
position: relative;
left:50%;
transform: translate(-50%,0);
width:360px;
margin-top:23px;
display: flex;
justify-content: space-between;
.text{
font-size: 26px;
color:#333333;
}
}
.grab-btn-group{
position: relative;
left:50%;
transform: translate(-50%,0);
width:404px;
margin-top:52px;
display: flex;
justify-content: space-between;
.grab-btn{
font-size: 36px;
}
.grab-btn-gray{
color:#666666;
}
.grab-btn-yellow{
color:#eba904;
}
}
} | {
"pile_set_name": "Github"
} |
<?php
/**
* Text_Template
*
* Copyright (c) 2009-2010, Sebastian Bergmann <[email protected]>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of Sebastian Bergmann nor the names of his
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* @category Text
* @package Template
* @author Sebastian Bergmann <[email protected]>
* @copyright 2009-2010 Sebastian Bergmann <[email protected]>
* @license http://www.opensource.org/licenses/BSD-3-Clause The BSD 3-Clause License
* @link http://github.com/sebastianbergmann/php-text-template
* @since File available since Release 1.1.0
*/
spl_autoload_register(
function ($class) {
static $classes = NULL;
static $path = NULL;
if ($classes === NULL) {
$classes = array(
___CLASSLIST___
);
$path = dirname(dirname(__FILE__));
}
$cn = strtolower($class);
if (isset($classes[$cn])) {
require $path . $classes[$cn];
}
}
);
| {
"pile_set_name": "Github"
} |
/*
* This source file is part of libRocket, the HTML/CSS Interface Middleware
*
* For the latest information, see http://www.librocket.com
*
* Copyright (c) 2008-2010 CodePoint Ltd, Shift Technology Ltd
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*/
#include "precompiled.h"
#include "../../Include/Rocket/Core/Box.h"
namespace Rocket {
namespace Core {
// Initialises a zero-sized box.
Box::Box() : content(0, 0), offset(0, 0)
{
memset(area_edges, 0, sizeof(area_edges));
}
// Initialises a box with a default content area and no padding, borders and margins.
Box::Box(const Vector2f& content) : content(content), offset(0, 0)
{
memset(area_edges, 0, sizeof(area_edges));
}
Box::~Box()
{
}
// Returns the offset of this box. This will usually be (0, 0).
const Vector2f& Box::GetOffset() const
{
return offset;
}
// Returns the top-left position of one of the areas.
Vector2f Box::GetPosition(Area area) const
{
Vector2f area_position(offset.x - area_edges[MARGIN][LEFT], offset.y - area_edges[MARGIN][TOP]);
for (int i = 0; i < area; i++)
{
area_position.x += area_edges[i][LEFT];
area_position.y += area_edges[i][TOP];
}
return area_position;
}
// Returns the size of one of the box's areas. This will include all inner areas.
Vector2f Box::GetSize(Area area) const
{
Vector2f area_size(content);
for (int i = PADDING; i >= area; i--)
{
area_size.x += (area_edges[i][LEFT] + area_edges[i][RIGHT]);
area_size.y += (area_edges[i][TOP] + area_edges[i][BOTTOM]);
}
return area_size;
}
// Sets the offset of the box, relative usually to the owning element.
void Box::SetOffset(const Vector2f& _offset)
{
offset = _offset;
}
// Sets the size of the content area.
void Box::SetContent(const Vector2f& _content)
{
content = _content;
}
// Sets the size of one of the segments of one of the box's outer areas.
void Box::SetEdge(Area area, Edge edge, float size)
{
area_edges[area][edge] = size;
}
// Returns the size of one of the area segments.
float Box::GetEdge(Area area, Edge edge) const
{
return area_edges[area][edge];
}
// Returns the cumulative size of one edge up to one of the box's areas.
float Box::GetCumulativeEdge(Area area, Edge edge) const
{
float size = 0;
int max_area = Math::Min((int) area, 2);
for (int i = 0; i <= max_area; i++)
size += area_edges[i][edge];
return size;
}
// Compares the size of the content area and the other area edges.
bool Box::operator==(const Box& rhs) const
{
return content == rhs.content && memcmp(area_edges, rhs.area_edges, sizeof(area_edges)) == 0;
}
// Compares the size of the content area and the other area edges.
bool Box::operator!=(const Box& rhs) const
{
return !(*this == rhs);
}
}
}
| {
"pile_set_name": "Github"
} |
rewrite style FunnyStyles/booboo mixed_rule-applied
rewrite all FunnyStyles/*
funnyStyles
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html lang="en" xml:lang="en" xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>Testcase, bug 379461</title>
<style type="text/css">
body { background: lime; color: black; }
</style>
</head>
<body>
<p>The entire canvas should have a lime background.</p>
</body>
</html>
| {
"pile_set_name": "Github"
} |
description: Gives developers the ability to push apps to a dedicated set of compute
instances.
form_types:
- description: |
Configure security and routing services for your isolation segment.
label: Networking
name: networking
property_inputs:
- description: |
If you are deploying onto an infrastructure that does not support
automated assignment of routers to load balancers (vSphere), or you are
intending to manually configure your load balancer with the IP
addresses of the routers, you should enter static IP addresses here
that will be used by the routers. On infrastructures where automated
load balancer assignment is supported (AWS, GCP, Azure), this field can
be left blank and the "Load Balancers" field on the "Resource Config"
tab can be filled out with the correct load balancer assignment.
label: Router IPs
reference: .isolated_router.static_ips
- description: |
If you are using HAProxy, enter its IP address(es). The addresses must
be within your subnet CIDR block. Point your DNS to this IP address
unless you are using your own load balancer.
label: HAProxy IPs
reference: .isolated_ha_proxy.static_ips
- label: Certificates and Private Keys for HAProxy and Router
property_inputs:
- description: A human-readable name describing the use of this certificate.
label: Name
reference: name
- description: |
This certificate is used to terminate SSL traffic at either the
HAProxy or Router. The certificate can have multiple domains
attributed to it, for example if you have separate system and
application domains.
label: Certificate and Private Key for HAProxy and Router
reference: certificate
reference: .properties.networking_poe_ssl_certs
- description: |
In addition to well-known, public CAs, and those trusted via the BOSH
trusted certificates collection, these certificates can be used to
validate the certificates from incoming client requests. All CA
certificates should be appended together into a single collection of
PEM-encoded entries.
label: Certificate Authorities Trusted by Router and HAProxy
reference: .properties.routing_custom_ca_certificates
- label: Minimum version of TLS supported by HAProxy and Router
reference: .properties.routing_minimum_tls_version
selector_property_inputs:
- label: TLSv1.0
reference: .properties.routing_minimum_tls_version.tls_v1_0
- label: TLSv1.1
reference: .properties.routing_minimum_tls_version.tls_v1_1
- label: TLSv1.2
reference: .properties.routing_minimum_tls_version.tls_v1_2
- description: |
To comply with GDPR, select one of the options to disable logging
of client IPs. If the source IP exposed by your load balancer is
its own, choose to disable logging of XFF header only. If the
source IP exposed by your load balancer is that of the downstream
client, choose to disable logging of the source IP also.
label: Logging of Client IPs in CF Router
reference: .properties.routing_log_client_ips
selector_property_inputs:
- label: Log client IPs
reference: .properties.routing_log_client_ips.log_client_ips
- label: Disable logging of X-Forwarded-For header only
reference: .properties.routing_log_client_ips.disable_x_forwarded_for
- label: Disable logging of both source IP and X-Forwarded-For header
reference: .properties.routing_log_client_ips.disable_all_log_client_ips
- label: |
Configure support for the X-Forwarded-Client-Cert header. This header
can be used by applications to verify the requester via mutual TLS.
The option you should select depends upon where you will be terminating
the TLS connection for the first time.
reference: .properties.routing_tls_termination
selector_property_inputs:
- label: |
TLS terminated for the first time at infrastructure load balancer
reference: .properties.routing_tls_termination.load_balancer
- label: |
TLS terminated for the first time at HAProxy
reference: .properties.routing_tls_termination.ha_proxy
- label: |
TLS terminated for the first time at the Router
reference: .properties.routing_tls_termination.router
- label: HAProxy behavior for Client Certificate Validation
reference: .properties.haproxy_client_cert_validation
selector_property_inputs:
- label: |
HAProxy does not request client certificates.
reference: .properties.haproxy_client_cert_validation.none
- label: |
HAProxy requests but does not require client certificates.
This option is necessary if you want to enable mTLS for applications
and TLS is terminated for the first time at HAProxy
reference: .properties.haproxy_client_cert_validation.request
- label: Router behavior for Client Certificate Validation
reference: .properties.router_client_cert_validation
selector_property_inputs:
- label: |
Router does not request client certificates. When used in combination
with "TLS terminated for the first time at the Router", a XFCC header
will not be forwarded to applications.
reference: .properties.router_client_cert_validation.none
- label: |
Router requests but does not require client certificates.
reference: .properties.router_client_cert_validation.request
- label: |
Router requires client certificates.
reference: .properties.router_client_cert_validation.require
- description: |
An ordered, colon-delimited list of Golang supported TLS cipher suites
in OpenSSL format. The recommended setting is
"ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384".
Operators should verify that these are supported by any clients or
downstream components that will initiate TLS handshakes with the Router.
label: TLS Cipher Suites for Router
reference: .properties.gorouter_ssl_ciphers
- description: |
An ordered, colon-delimited list of TLS cipher suites in OpenSSL format.
The recommended setting is
"DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384".
Operators should verify that these are supported by any clients or downstream
components that will initiate TLS handshakes with the HAProxy.
label: TLS Cipher Suites for HAProxy
reference: .properties.haproxy_ssl_ciphers
- label: |
HAProxy forwards requests to Router over TLS. When enabled, HAProxy
will forward all requests to the Router over TLS. HAProxy will use
the CA provided to verify the certificates provided by the Router.
reference: .properties.haproxy_forward_tls
selector_property_inputs:
- label: Enable
property_inputs:
- description: |
You need to provide a certificate authority for the certificate
and key provided in the "Certificate and Private Key for HAProxy
and Router" field. HAProxy will verify those certificates using
this CA when establishing a connection. If you generated that
certificate and key using the "Generate RSA Certificate" feature,
then your CA is the Ops Manager CA, and can be found by visiting
the "/api/v0/certificate_authorities" API endpoint.
label: Certificate Authority for HAProxy Backend
reference: .properties.haproxy_forward_tls.enable.backend_ca
reference: .properties.haproxy_forward_tls.enable
- label: Disable
reference: .properties.haproxy_forward_tls.disable
- label: HAProxy support for HSTS
reference: .properties.haproxy_hsts_support
selector_property_inputs:
- label: Enable
property_inputs:
- label: Max Age in Seconds
reference: .properties.haproxy_hsts_support.enable.max_age
- label: Include Subdomains
reference: .properties.haproxy_hsts_support.enable.include_subdomains
- label: Enable Preload
reference: .properties.haproxy_hsts_support.enable.enable_preload
reference: .properties.haproxy_hsts_support.enable
- label: Disable
reference: .properties.haproxy_hsts_support.disable
- description: |
You can disable SSL verification if you are using your own self-signed
certificates that are not from a trusted CA.
label: Disable SSL certificate verification for this environment
reference: .properties.skip_cert_verify
- description: When checked, HAProxy and Gorouter will not listen on port 80.
label: Disable HTTP on HAProxy and Gorouter
reference: .properties.routing_disable_http
- description: |
If checked, this turns on the secure flag for cookies generated by
router.
label: Disable insecure cookies on the Router
reference: .isolated_router.disable_insecure_cookies
- description: |
If checked, the router will include Zipkin tracing headers on all
incoming requests.
label: Enable Zipkin tracing headers on the router
reference: .isolated_router.enable_zipkin
- description: |
When enabled, the Router will write access logs to the local disk.
Under some circumstances, it is advisable to disable this feature
to prevent Router from filling the local disk entirely with the
contents of access logs.
label: Enable Router to write access logs locally
reference: .isolated_router.enable_write_access_logs
- description: |
When enabled, Gorouter will parse each request for the presence
of PROXY metadata. This comes with a performance penalty.
If a client IP is sent this way, as when a load balancer
terminates TLS but doesn't support HTTP, Gorouter will set
the X-Forwarded-For header to this value.
label: Enable support for PROXY protocol in CF Router
reference: .properties.router_enable_proxy
- description: |
Maximum concurrent TCP connections per backend for each instance of
the Router. A value of 0 means unlimited.
label: Max Connections Per Backend
reference: .properties.router_backend_max_conn
- label: Enable Keepalive Connections for Router
reference: .properties.router_keepalive_connections
selector_property_inputs:
- label: Enable
reference: .properties.router_keepalive_connections.enable
- label: Disable
reference: .properties.router_keepalive_connections.disable
- description: |
Timeout for connections from Router (and HAProxy, if you use it) to
applications and system components. Increase this to accommodate larger
uploads over connections with high latency.
label: Router Timeout to Backends (in seconds)
reference: .isolated_router.request_timeout_in_seconds
- description: |
Specifies the amount of time, in seconds, that the Router will continue
to accept connections before shutting down. During this period the
healthcheck will report unhealthy to cause load balancers to fail over
to other Routers. This value should be greater than or equal to the max
time it could take your load balancer to consider a Router instance
unhealthy, given contiguous failed healthchecks.
label: Load Balancer Unhealthy Threshold
reference: .isolated_router.drain_wait
- description: |
Specifies the amount of time, in seconds, to wait until declaring the
router instance started. This allows an external load balancer time to
register the instance as healthy.
label: Load Balancer Healthy Threshold
reference: .isolated_router.lb_healthy_threshold
- description: |
A comma-separated list of HTTP headers that will annotate access log
events on the GoRouter.
label: HTTP Headers to Log
reference: .isolated_router.extra_headers_to_log
- description: |
Buffer size (in bytes) to use for requests, any requests larger
than this (large cookies or query strings) will result in a
gateway error. The default value is 16384.
label: HAProxy Request Max Buffer Size
reference: .properties.haproxy_max_buffer_size
- description: |
A comma-separated list of domains to protect from requests from
unknown sources. Use this property in conjunction with
"Trusted CIDRs" to protect these domains from requests from
unknown sources.
label: HAProxy Protected Domains
reference: .isolated_ha_proxy.internal_only_domains
- description: |
A space-separated list of CIDRs allowed to make requests
to the domains listed in the "Protected Domains" field. For example,
specifying 10.0.1.0/24 would allow any requests originating at a host IP
in that range to reach applications or services hosted on the
"Protected Domains" list.
label: HAProxy Trusted CIDRs
reference: .isolated_ha_proxy.trusted_domain_cidrs
- description: |
DNS search domains to be used in containers. A comma-separated list can
be specified.
label: DNS Search Domains
reference: .properties.cf_networking_search_domains
- label: |
Router Sharding Mode: When "Isolation Segment Only" is selected, the
routers of this tile will only have knowledge of applications deployed to
the Cells of this tile; all other requests will receive a 404 response.
When "No Isolation Segment" is selected, the routers of this tile will
reject requests for any isolation segment. Choose "No Isolation Segments"
to add a group of routers for the Pivotal Application Service tile, as when a private
point of entry for the system domain is desired."
reference: .properties.routing_table_sharding_mode
selector_property_inputs:
- label: Isolation Segment Only
reference: .properties.routing_table_sharding_mode.isolation_segment_only
- label: No Isolation Segment
reference: .properties.routing_table_sharding_mode.no_isolation_segment
- description: |
Enable custom configuration that supports your applications at a container
level.
label: Application Containers
name: application_containers
property_inputs:
- description: |
If you use private Docker image registries that are secured with
self-signed certificates, enter them here as a comma-delimited list. List
each registry as either an IP:Port tuple or a Hostname:Port tuple.
label: Private Docker Insecure Registry Whitelist
placeholder: |
10.10.10.10:8888,example.com:8888
reference: .isolated_diego_cell.insecure_docker_registry_list
- description: |
This field is used as the placement tag for your Diego Cells. It should
be unique. Use this name to configure an Isolation Segment with the Cloud
Controller.
label: Segment Name
reference: .isolated_diego_cell.placement_tag
- label: Docker Images Disk-Cleanup Scheduling on Cell VMs
reference: .properties.garden_disk_cleanup
selector_property_inputs:
- label: Never clean up Cell disk-space
reference: .properties.garden_disk_cleanup.never
- label: Routinely clean up Cell disk-space
reference: .properties.garden_disk_cleanup.routine
- label: Clean up disk-space once threshold is reached
property_inputs:
- description: |
Disk cleanup will initiate whenever a Cell has exceeded this much
disk-space for filesystem layers.
label: Threshold of Disk-Used (MB)
reference: .properties.garden_disk_cleanup.threshold.cleanup_threshold_in_mb
reference: .properties.garden_disk_cleanup.threshold
- label: |
Enabling NFSv3 volume services will allow application developers to bind
existing NFS volumes to their applications for shared file access.
reference: .properties.nfs_volume_driver
selector_property_inputs:
- label: Enable
property_inputs:
- description: |
Service Account User name (required for LDAP integration only)
label: LDAP Service Account User
reference: .properties.nfs_volume_driver.enable.ldap_service_account_user
- description: |
Service Account Password (required for LDAP integration only)
label: LDAP Service Account Password
reference: .properties.nfs_volume_driver.enable.ldap_service_account_password
- description: |
Server hostname or IP address (required for LDAP integration only)
label: LDAP Server Host
reference: .properties.nfs_volume_driver.enable.ldap_server_host
- description: |
Server port (required for LDAP integration only). Defaults to 389.
label: LDAP Server Port
reference: .properties.nfs_volume_driver.enable.ldap_server_port
- description: |
FQDN for user records searched during user uid resolution
(required for LDAP integration only). For example, the FQDN
value can be specified as "cn=Users,dc=corp,dc=test,dc=com".
label: LDAP User Fully-Qualified Domain Name
placeholder: cn=Users,dc=corp,dc=test,dc=com
reference: .properties.nfs_volume_driver.enable.ldap_user_fqdn
reference: .properties.nfs_volume_driver.enable
- label: Disable
reference: .properties.nfs_volume_driver.disable
- description: |
Enabling the GrootFS container image plugin is recommended.
However, if you experience issues with GrootFS, you can disable the plugin
and rollback to the image plugin that is built into Garden RunC.
It is recommended to recreate all VMs in the BOSH Director config
when toggling this option on or off.
label: Enable the GrootFS container image plugin for Garden RunC
reference: .properties.enable_grootfs
- description: |
Improves resiliency and consistency for application routes.
label: Router uses TLS to verify application identity
reference: .properties.rep_proxy_enabled
- description: |
Optionally configure rsyslog to forward platform component logs
to an external service.
If you do not fill these fields,
platform logs will not be forwarded
but will remain available on the component VMs
and for download via Ops Manager.
label: System Logging
name: system_logging
property_inputs:
- label: |
Do you want to configure syslog for system components?
reference: .properties.system_logging
selector_property_inputs:
- label: "No"
reference: .properties.system_logging.disabled
- label: "Yes"
property_inputs:
- description: |
The aggregator must be reachable from the Isolation Segment network,
accept TCP, UDP or RELP connections, and use the RELP protocol (e.g.
rsyslogd). You can also configure this with an IP address.
label: Address
reference: .properties.system_logging.enabled.host
- description: |
The typical syslogd port is 514. Ensure syslogd is listening on
external interfaces.
label: Port
reference: .properties.system_logging.enabled.port
- description: |
Select the transport protocol for forwarding logs.
label: Transport Protocol
reference: .properties.system_logging.enabled.protocol
- description: |
When checked both Permitted Peer and TLS CA Certificate are required.
label: Enable TLS
reference: .properties.system_logging.enabled.tls_enabled
- description: |
Either the accepted fingerprint (SHA1) or name of remote peer, e.g.
*.example.com
label: Permitted Peer
reference: .properties.system_logging.enabled.tls_permitted_peer
- description: |
This certificate will ensure that logs get securely transported to
the syslog destination
label: TLS CA Certificate
reference: .properties.system_logging.enabled.tls_ca_cert
- description: |
Workaround to avoid truncation of very long
(> 1024 bytes)
log lines.
May negatively impact performance.
label: Use TCP for file forwarding local transport
reference: .properties.system_logging.enabled.use_tcp_for_file_forwarding_local_transport
- description: |
Accepts configuration for rsyslog
in the rainerscript syntax.
For example,
'if ($app-name startswith "exampleComponent") then stop'
can be used to drop all traffic
from a particular component.
Entered configuration will be applied
prior to the forwarding rule.
label: Custom rsyslog Configuration
reference: .properties.system_logging.enabled.syslog_rule
reference: .properties.system_logging.enabled
- description: |
These are several features that may require a greater understanding of
Cloud Foundry system components and their more recently introduced
capabilities.
label: Advanced Features
name: advanced_features
property_inputs:
- label: Cell Memory Capacity (MB)
reference: .isolated_diego_cell.executor_memory_capacity
- label: Cell Disk Capacity (MB)
reference: .isolated_diego_cell.executor_disk_capacity
icon_image: iVBORw0KGgoAAAANSUhEUgAAAIAAAACACAYAAADDPmHLAAAAAXNSR0IArs4c6QAAGtVJREFUeAHlXQd4FNe1/oV6l0ASKkgIhCRUkOgtFBuDAReKU+wkTnEcE8dxITE2pts0+4GxE1yfE7f3nNix/cVgCJhuSigCARKoAZKQkISQUO+VnLMwWFppd2d3quTzffvN7uzMnXvP/eeWc/9zrt0NEnxPpLKpDXuvNuLk9SZcqm5FVUu7oeTejn0wxMsBY/ycMT3IBT7O9t8TjQAO34eSZle3YF1qFT7NrUNjm3m8u9jb4aeD3LEswRsRXo69Xj12vbkFKKpvxZqUKrx/oQYt5uu9S0U72gGPRnliRaI3gt1673vSKwFQ1tiGV85V4a3MGjRYeOO71LzRCVdqEf4w1BMvDPNGP5fe1zX0KgDUUJ/+elo1NqVVodraV96o4o1/elGT8GycN/4Y5wVPGjP0FukVAGhsbcfbWTV4mfr56003B3ZKVZCfcx8sofHBE9GecHHo+UDo0QBobb+BDy/WYnVKJQrq25Sq827THeBmj5WJPngk0gMOfWjA0EOlRwKAZ66f0Yh+5ZlKXKpp1VT1QzwdsHqEDx6imYOdXc8DQo8DwPYr9Vh2ugKpFS2aVrzxwxN8HbFupC/uC3Uz/kvXv3sMAL692oClpytxrLRJ1wqd4O+M9SN9cEeQq67zKWRO9wA4RVa7pckV2EMWvJ4kM8iiuH6UL0aTdVHPolsApFc2YwW98f/Kr9ez/izm7YEwN6yhFiHWx8nitVpcoDsA5Na04MWzlfgkuw7KTujUUzdPFh+OcMdLw30Q7qkv87JuAFBMZtu1NI9/j+bzMttwTNe03a1u5YaL6Wtk/IfNywvIfrCc7AiBOjEvaw6AClqh+x8y276RUYN6iWZb0XVl1ww45wNOBTdvaR4ANIUBN9Rppt3IvPxUjCcWk3nZV+OVR80AUEdm2z+nV2Pj+SpalrVypUZ0TRtfSDYD5ys3P3ZGhqMbZOdvCr35UWmR1JuahOfivbEw1gvuGpmXVQdAE73l71Izvz61EiWNavXyVNlOhVTxeUAfC4ajdlr5axoINIcQetRZ/Alw6YOlCT54nLoHZ2od1BTVANBGZtuPs2vxEg3w8uuM3j7FSkwAc7pKFX+ZKp6afWuknbqDpnD6BAF26tj8w9ztsYoGir+K8IC9SuZlxQHAZtsvLteT2bYCWcTCUUO4uqZTve2uOgI4SrQYtjljpvck7ClsV21WEk3spNUjfPHjcDfFzcuKQntnQT1GbbuKBw+Wqlb582jefW5eMNrKdwHlNNCTKvZNaHVNN6TJaash/KKwzlh3rEMlRREAHL7WiMk7ruKevSU4U25l02tjaZnLl3RfEL6aFoD04kzsy7sEVFZQEy7dgriv9CrS64oMafMz+FlqCOuOdci6ZJ0qIbJ2AWfKmmihphI7CxuUyGu3aRrb3htaWjD0rxuQX11583pXemvDwru915qTYa7uyJzxAFztb9LDtFibmB3iSgtOPhjRTz7zsiwAyKpqwQrq47+kvl6tCR2vvq2hfnKOUbO84tA3WHt0X+e6DaIRvZd353M2/FoenYg1sSM73fk1maq57GqtTvIc4Uc0NuCyR3tLtypKAkB+bathVM+je7VsOLz+/hKtvzNz13j9PbeyHLF/3UjMX6PBpgO9tYOG0ExAWo/n0sceadPnY7C7ZycQ8ECXGcerVOQn8GyRZws8awjzuNkqdcqUyB82AaCkoY1o1pX4X5rPK8zAul0MZuCsIAbOb8wwcOZ8+QG2Xcq4fU+nL337Af79O52y5cecwFBsnTC921uZofQBMZTWqMhQIoYafkf2g2VkRwhwtd5uYRUA2LHiVSJdsgWvrlWdxl4sB++bnEzM/vz9bivm9kluBZykm3t3TpyBWf0H3E7W+IuaHEXh2e4OdgaL4iIirVrj2CIKAPVEutxMlb7hfDUqmtWx3jEL90/Ewv2TCBZuMzX5w/62CRcqrgv66P7o7gEMIJu/RIny8MK5u+bBiboEc8Is5dfohXlNAZayqef6OvXB8/FeeJrMy24iSKtmAdBMHftfyamCV+mKqdlXQ2zh4b9ybD+WHNwpLnshoYBH5z5c3I2dr3olbhQWRyV0Pmnil5x+CiYe0eV0IHUHvOr4GDm3OJkxL5sEQHJhDX6RVIWMKqMBVZdHyXOCl0pt8cQpqqlC9HsbUNsi0t7gSCPn8AjJA0IPmg7ytDCEpodiRYqnkthnGF8X4+2Aj6d6Ykzf7mdBJofFydmlKD1+DlM82qHkQhVn4OHBNMd+IATvTOhntRvWov3bxVc+a4fsBCgvM9aT1b9rqdt57vxJq+5jFzMuI5eVy2xS+Val2v3FXGdTg1tR5ngCJypoIcyEmM3D9co6HNpzGv4XL2CSF700Mi9UzQ11RcrcYPz/FH8MtoEpc/hKDj7NOGuiaGZOl9NYgYEgUT4tyMWh68VWp8Jl5TJz2VkHcgrX0aTAdvT3S8HBusMoaak2m7xZAAh3FpVU48iuUxiYn4MJXnaQioO7yJR64t4gbLmrP+J9bRuVt7W346k9W4QsWndkj/gS6yuuu4c8lXKcbCC2DYy57KwD1gXrRIpwnUykWW54QBqONBxEQVO5qOREAUBIKbegHMd2nUTUtXyM9rLqVkMS44kyvW9mf+ydGYix9F2KvHPmGFJKaKnXVqmtAerrbL379n2p1RV4h6agUoR1wTph3bCOrJUx/naIDryAo40HkNNYYtXt1tciJZ+VU4JTu5IwrKIICSKAMMzHEVtpkeYYIX2aDHz561RxKw/Tap9EcSi9RpQPqe0ZsDLjDPkkSl+sYd2wjlhXrDNLktjXDgnBuTjZvB+ZDab7eXPp2AQAIcFzmUVIJSCMqivFUM+uc+IIMtt+MtkPZ6mvM7bZC2nYclxycAcqGhtsubXTPQsTxuHpiJhO52z5UUEzkCVpybbc2u09rCvWGeuOdWgsMT59MCq4AClt+5Fad9n4b6t+d03dqttvXpycmgc7+owbFYFS335oumFnMNs+asZsa8NjDLckFxfggxTrRt/dPSuQ7PkrfjAdN2h94O80mCxplvYGf5B3Ab8bFI3Rvn7dPc7qc33Iz/DnZOt/kNY83r9lXna2vwF/r6tIqknHDem9lyFPklqAjqViw/CJ5Gxc3peE5NkBBvu03F6zvOjy5O6viJkj3Qy98c574eXsAm9HJ2yIH92xKDZ952HgkynHwHmUU1iHbOs/db8/cu324UQtVb70Xut2FmUDgJAirYeAzNKKyEfnTuF4Ub7ktCcED8TP475b1v1l2BCMk+HNPUGm6I/yiYiigPC8Xs6KF7IoOwCEhOU+VtMga8m3OyQn24cGfW/ePa/TUjIvK7+ZOEEWw8wL508RzV2kVVJyaaQn0GMAsOrwblyrr5Vc4gXDx2FkYNeVPO67fxseJTl9HkusollBT5EeAYD069fwZvJ/JOu0r4sr1k6dZTKddbGj4ENjAqnyVk4G0sg+0BOkRwDgqT1fodVGa1vHSlg3dTb6mVm88aNB4VojylfH+8V+b6WBIFsIe4LoHgBfZKZgf162ZF0ODwjGY4njLKbzOE3lErx8LV5n6YIDtEbwOa0V6F10DYB6Gkzxap8c8saMeeRtY7m49uQF9EbieDkeiUW0Wljfqs5yuq0ZtqwRW1OW4b71R/d/R++WkN4vaMo3KXSQ6BSm+AXiZwMGi77e1IVXGuqwLivF1N+6OK9bAORUluHVpG8lK8mDBnWv3HGP1emwcYhJH1Jl06XzyK41vyQr9RlS7tctAJ7ZsxVNbdJpaKsmzUCwZ/dsGHOKY6bPiqHDzV0i6r8mWrZ+JvWEqGu1uEiXANiZnYnt2RmS9RHd1x/PjJ5sczoLh8Qi0p2YMBLl39cK8O/iKxJTUeZ23QGAGb7P7N0qS2n/Mn0uHO27rlKKTZxZv3+hFUM5ZCG1AnK0aHLkpWMaugPApqRDuGiJ3t2xBCa+z4+Kx8zB0Sb+FX96NlkN5wZJp5JfqqsBjwf0JroCQCExfNcd3StZRy40eNs07X7J6QgJvDZsLJxFTCGF600d12elooBmBnoSXQFg0f5tqJOBrLl4/J0Y5NNXNj2zL+DiqGGS06uj7m3ROelcBskZ6ZCAbgBwiAinn2VInzMP9PIBA0BuWRyZAHYRlyr/LMzFtxRvQC+iCwBIYvgaafL1u+bAlZ0/ZBY38jDmrkAO4XWCVpoe6kF0AYC3Tx9FqgxvxfTwSMyPlt5Um6qYH4aEY4Q7eRlLlPM1leAVQz2I5gAopTV+ORi+jjRI20zTPqXkRG4Zpr9+CGe+aYAc0aKYM1DSRGlpLJoDgFk+lTJQqtngE+PXX3Z1niuswvx3jmL8KwewL5M493VkV8iT3sVUtbbIyiS2teDSjd22PpnuO3X1Cj5MPSUhhZu3CgxfyQl1SOBSSS1e3JaGT09eAfMcO8klct4gvzs4G//R6SqLPz7Mu4jfhUdjLFkstRLNWgA5Gb4bbjF85VBiQUU9Hv8kGTGrduHvSd1UPj+kjVivWdZ78Bjnj+HzJA0I5WYSGz/H3G/NWoAPU0/iBLUAUoUZvg93YPjaml5pTRNe+SYTb3+bDY7wYVGKSHVh9P74iLjWTGInK6/jA2oJHpWBj2jmMSb/0gQAVeTVw949UqU7hq+1aVY1tOC1PRfw2t4LqKUQOOKFWoF0cuicQIEc6asUWZJ2Cj8kIPs4SW9VrM2HJgBYdWQ3SmRwzDTF8BWjhPrmVrx1INvw1pfX2+gqXk0Dwis0IAyz8f5bGS1tbjL4F26WiYkkpvzCNaoDIK20GG8lHxWeb/PREsPXVMLN1Lz/7Ugu1u7IwNUqae5ghmdcJBZxEAFA4sTgbfIwfoy6gWHe8pmwTemg43nVB4Hs0y8Hw3ftlFlmGb4dC8nfOVr5/x3LQ/TKb/CHT8/IU/mccAup8IL0pruN3N2eSlWfSaxqC/A52foP5MvD8F0wXBxxk0fYX50pxPKtacgoppgASgh3A6HUCnhJGxAeJP+Hzwpy8JAMfESxxVQNADcZvtvE5svsdWIZvrvSirFsy3kk59+KG2w2VSl/0igwg1qBcdIte7xaeD8Fo3R3kNiniCyOal3AOorfe4XW+6UKT/ksMXyPXLqOKRu/xazNR1So/FslqqB3iaeGEqWwsR5rVWQSS8+xiAJnE8NnU9JBEVeav4QZvmz0MSVn8isMTf2O88WmLlH2PBuH+pOF0HYWmiF/r11Mw28GRiLSw3oyq7UFVKUFWLj3a1n4cMzwDaIoncaSWVyNn7x3HKPW7YNWle9Bu38tmxaLl2JGGGfP6t/N5AanFpNY8RZgB7F7lWL45pXV4aXt6YbRPQU11URcKBzr41MHY+nsGPh7OqO5vQ3/KMpGlkRfgJ3XCrHtaj7ul4GPaE4xigLAwPAlfr8c0pHhW0zz93U0j3/vcA44nK0WwpE7fj1xIF68Lw4hvt/F+hOYxLOO7pGcLWYS3x0QQjuJSexTzOREUQAww/cSefhIlXmRcQaGb0VdMzbszsLm/ZdQ32yN2VZqDr67nwMxPjg6FGvmxiHCn4JPdyMzKZL4PHpzt9AbLEVyiCux8eI5LJfBQcVUPhQDQAFt2SIXw3fNpHsMb/xGqvyqBu2cLeckBGHdvHjEh1genDF97BtqxhupS5AiL19IBYewcZfBTa27fCgGAPbqlYPhOzUgAdM2HEdprXZhV6ZF++Pl+cMwdpB4M+0gYhI/T74Jq8m9XYrUk3vcs+eS8O7wiVKSMXmvIgA4cuUy/imx4Jxj+3Zn7DpG/d8NbSp/wuC+hjf+zugAkwo098cLFE7+Y9q9LE+iL8CXRXn4yYBB5h5l83+KAECOpV4uUVtpGFW+KjPVTgpMHOCN1XPiMCcxuNN5a3/wDmPcFfww6YC1t3a5/lmF/AkUAUBGGXHnpA5cG2i+Xy++ye2iMRtORAV44MX7Y/HQmNBOUcRsSOr2LQ+EhGO6fxD2SmQ9c6wBJUQRAEjOKAfEKwuXnIzYBEJpGrfi3hg8MjEcDvbytzibE8Yjcf8WtMgcRFJs+cxdp08AVBO7t+W7ubW5Akj5L4AMN0tmDcXvyZDj7Ci1yTKdkxjyVno6IpacQ9NMX6TRP/oDQCutglWEKKoOH1dHLLo7CgvvioS7szoqWElzeY5JXKwDX4COylWn9B2faOl7RSgN/JTJlruTPZ6aNgSLZ0bDx016PEBLRen4v9etmMS/TD7c8bTm35XRtK3FaiTLWq2frXebvM+Z7PULJg/Csnti0N+LiJwaycOhEXg3NwtHy63b1EHJ7OoHAGzSNwz8aAAokxi2V50QjlU0sg/r6yZTqrYnwzGJ36BYhWMObJPDu8z2jHS4Uz8AqCFjS7N092suG0Pox6MGGOz1Uf09OxRX+68jffywgIJRckugB9EHANpoBF4xQBZ93DcsCGtpoSYx1EeW9JRIZG3MSEMU0XIdRBXXBwB44NcujQN3R5Q/1tNCzYQI6e7bSlR6xzT7GWISj8ITtMGE1qI9AJqob+bm30YZG+5Lb3w8ZsTK7xlsY5ZE3cbby7x3OQtnq8Rt7yYqURsuUgYArTTFshe5bGvjwC8+2Mtgr58/QlmbgQ06FXUL7wn0JnkCTTq0Q9T1Sl2kDACK4gHPUnKcLKT9Y8ys5NVSc91k3SAtwt+dWDix+NnYMPRhdkYPlh/064+HQwfjEzIQaSXKAIDH4dys19Cc3usaAaGoa4vQTjb3clrtEykhPi5kr4/Foz9Qxl4vMhuyX7Yhfgy2EnOoRqOo4goBQNATVXJ10E0weBNV2/sqbUB8iyFTSU13m2VrnJ+HE14ge/0f7oiAi4L2eiHHah+DXNzAZuLnaK8hLURhANwq0g2a5nGF8yKPN7UGLuSpUxVotrxeLg54dkYU/jg9Ep4u0mYIZh+kgz+fiYjD+5cvIrNWuuOMtcUxCQAao8gv7fS4Cm72ec7f/bKrG9nrn6S3fTG99X3dLbcQ8mdS3RRbKFzc3y5fQKWCNgG2QJoSkwBQVvldK9+J7La/nTQIy2ldPshb+aVgUwpR63w7cQM+uZKNFylaWK4Mu6GZy3dfR/JYMiEmATB6oK+JW+Q/7e3qYDDi/H5qhGxMHPlzKV+K/yq8jBUZp5Eug6+kmFyNpu18TYkduU+b9KwY9/I+JF2uMHWv7OdHhvlgHRl1ZsWbHx/I/mCVEtxNNPFl6ck4JYOvhNgsj6CAE6enmY6faBYAW84WUow89c2Vk4f4Yf38eEyiY2+Q/5RdM1Q8+/+rLV+MvRM/Il6iKTELAL5p7lv/wdepNH3TQGbHBWLtvDiMDFOvO5KzmGfpTV+efhq8Y4gWci95KG2fOMPsoy0CgKNosa99KkXM1EJ4/PojWtpdPScWQwO7egZrkSdLz8yivn0l9fFfUF9vsn+1lIjE/+M9fXBoyj3wtRB5zCIAOB+V9c145ONT2HKW5vAaCZM7fjGenDGJ3DGwnzy8AbmLkk+j+ZcyzxqcQTjmj1bCEUY+HjXZYuVz/kQBQCjI7vRiLP1KjZArwhO7Hnm6uGDyYAO9K9BbO3pXx5xdo7iH6ymqB5M82LdfK+EBH+9/zNvciBWrACAk+mVyAVZ+rWDQJeFBZo5sMHqaCJ7P3x0NX40MRhUU32/jxfPYnJ0O3g1EK4mmoBkcmOInIYOsnkbbBAAuaDuHXTueh9UUoCG3rF6rsoNtCM8RCJ6ZFgkPMh+rIXUU6fsvVOlc+Upa8CyVZeCtvQ1/PXAIeMtbW8RmAAgPa2lrx3uHcrBuZ6Z8sfeExK04spPH0tlD8fgU5Zw8eNu3d3MzqblPRUlzoxW5k/fS/sQoWhqdCN7omgNSSBHJABAe3kABGzbvv4gNu7Jgc+hVITEJR3bzWkl8gV9PGCibm1cb9esfkZcvD/CU8tETU2Rf8i1YFBmPhbR4xFvYyCGyAUDIDE8bX6VADn/ed9HK4MtCCvIcI8nRkz18Hxw9wOp+UcgBG0k/p02eVpK9/oLEmD9CmrYcOTgEu5Y9T5Uvd0Bp2QEgFJDDr6/fmYF3D+aIC78u3CjzkV29mSV8X0KwVSlvp1D2y8lsm1KtnincOIO8V+EC2lBi+dBEBDgrs0CmGACEwvAGDBzJ66OjebRTlnZzYw72wKzhOywEe+At3ZZSxR8rJ0qbRmJPjKpf0cBuFRFFwtw8FM2F4gAQcs9bsKzYeh7/PFWgoYkEmBETYIj6MSa8c+yBkxWlWJZ2GntKtTN2sdXzx2S3X0N+A1E27Hgu6Nqao2oAEDKVWlBJ8XvTsP2cNusLQj7mDQ82dA3waDfY66VG9BLStfV4D9nt18eNQqLK4eJVB4CgoOM5ZVhKgZwPZGnX1BpIxTNrNPXTm0o7na0n691EYghrIZoBQCjs3gxaKiUgqMk7EJ5tOM6q6fRTrR+jffoZzLZ399fWr0FzAAgKZ+7BCorpf76oWjilzlFlAMRS3859PMcO0oPoBgCsDDYv/yMpHy/SrCG7VJmgSF2UrhIABtNofhXZ6zlGAHsF6UV0BQBBKWxefp/29Vnz7wwUybGvj5Bwd0eFARDs4oplZLZ9jObzvL2t3kSXABCU1NjShjcPXKKdvbJQRnGCFRGFANCPiBiLI4fhyYgYcLxAvYquASAoraaxBZt4b789F1HTJPOyq8wA8CQb/R+HxOHZIfHguEB6lx4BAEGJZbVNePnW7p4NLTIRL2QCgAutyj0xeCiWUHhYP1qt6ynSowAgKLWossHAQ/jg6GW0SN0vQCIAHGhAx9u7sH9fCK3P9zTpkQAQlJxTWotV29INMweblxlsBAAP535K27sxEyeim21shDzq/dijASAoN62oyrBZlE2kVRsAMJc2g1gbOxLxXj2Tri7ojY+9AgBCgZJyyw1Wxb2ZVsThswIAd1HQZzbbju3rLzyyxx97FQCE2jiQVWIAwrEcEfF3RABgvC8FoKKFmjsJAL1NeiUAhErallJk6BrMOrWYAUACNfFrqKmfo/DOXUJ+tTj2agCwQpnW9dnJK4bB4kXiJHSRbgAwhLZ7WU32+odolw5zvvVd0uqBJ3o9AIQ6aSXz8oc0bVy9PQMFNI28LR0AMOBWuJZHaFrnoEOz7e08y/jlewMAQWdNZF5++2A2XiYau2EjKgKAP5ltl5C9/olBQxXdo0/Ig56O3zsACMqvbWzF6/suABHNBtOth0q7dQvP18vxv5+hgSuV93JpAAAAAElFTkSuQmCC
install_time_verifiers: []
job_types:
- description: HAProxy is the default load balancer for SSL termination. Alternatively,
you can use your own load balancer and forward traffic to the Pivotal CF Elastic
Runtime router IP. Self signed certificates will function properly.
dynamic_ip: 0
instance_definition:
configurable: true
constraints:
min: 0
default: 3
label: Instances
name: instances
type: integer
label: HAProxy
max_in_flight: 1
name: isolated_ha_proxy
property_blueprints:
- configurable: true
name: static_ips
optional: true
type: ip_ranges
- configurable: true
name: internal_only_domains
optional: true
type: string_list
- configurable: true
name: trusted_domain_cidrs
optional: true
type: string
resource_definitions:
- configurable: true
constraints:
min: 1024
default: 1024
label: RAM
name: ram
type: integer
- configurable: true
constraints:
min: 2048
default: 2048
label: Ephemeral Disk
name: ephemeral_disk
type: integer
- configurable: false
default: 0
label: Persistent Disk
name: persistent_disk
type: integer
- configurable: true
constraints:
min: 1
default: 1
label: CPU
name: cpu
type: integer
resource_label: HAProxy
single_az_only: false
static_ip: 1
templates:
- consumes: |
consul_common: {from: consul_common_link, deployment: (( ..cf.deployment_name )) }
consul_client: {from: consul_client_link, deployment: (( ..cf.deployment_name )) }
consul_server: nil
name: consul_agent
release: consul
- manifest: |
ha_proxy:
backend_ca_file: (( .properties.haproxy_forward_tls.enable.backend_ca.value ))
backend_crt: |
(( .properties.haproxy_client_certificate.cert_pem ))
(( .properties.haproxy_client_certificate.private_key_pem ))
backend_port: (( .properties.haproxy_forward_tls.selected_option.parsed_manifest(backend_port) ))
backend_ssl: (( .properties.haproxy_forward_tls.selected_option.parsed_manifest(backend_ssl) ))
buffer_size_bytes: (( .properties.haproxy_max_buffer_size.value ))
client_cert: (( .properties.haproxy_client_cert_validation.selected_option.parsed_manifest(client_cert_validation) ))
client_timeout: (( .isolated_router.request_timeout_in_seconds.value ))
disable_http: (( .properties.routing_disable_http.value ))
disable_tls_10: (( .properties.routing_minimum_tls_version.selected_option.parsed_manifest(disable_tls_10) ))
disable_tls_11: (( .properties.routing_minimum_tls_version.selected_option.parsed_manifest(disable_tls_11) ))
enable_health_check_http: true
hsts_enable: (( .properties.haproxy_hsts_support.selected_option.parsed_manifest(hsts_enable) ))
hsts_include_subdomains: (( .properties.haproxy_hsts_support.selected_option.parsed_manifest(hsts_include_subdomains) ))
hsts_max_age: (( .properties.haproxy_hsts_support.selected_option.parsed_manifest(hsts_max_age) ))
hsts_preload: (( .properties.haproxy_hsts_support.selected_option.parsed_manifest(hsts_preload) ))
internal_only_domains: (( .isolated_ha_proxy.internal_only_domains.parsed_strings ))
server_timeout: (( .isolated_router.request_timeout_in_seconds.value ))
ssl_ciphers: (( .properties.haproxy_ssl_ciphers.value ))
ssl_pem: (( .properties.networking_poe_ssl_certs.parsed_manifest(routing_certificates_and_private_keys) ))
tcp_link_port: 2222
trusted_domain_cidrs: (( .isolated_ha_proxy.trusted_domain_cidrs.value ))
name: haproxy
release: haproxy
- manifest: |
loggregator:
tls:
ca_cert: (( $ops_manager.ca_certificate ))
metron:
cert: (( .properties.metron_tls_cert.cert_pem ))
key: (( .properties.metron_tls_cert.private_key_pem ))
name: metron_agent
release: loggregator
- manifest: |
syslog: (( .properties.system_logging.selected_option.parsed_manifest(syslog) ))
name: syslog_forwarder
release: syslog
- description: Routers route Internet traffic to applications placed in the isolation
segment.
dynamic_ip: 0
instance_definition:
configurable: true
constraints:
min: 0
default: 3
label: Instances
name: instances
type: integer
label: Router
max_in_flight: 1
name: isolated_router
property_blueprints:
- configurable: true
name: static_ips
optional: true
type: ip_ranges
- configurable: true
default: false
name: disable_insecure_cookies
type: boolean
- default:
identity: router_status
name: status_credentials
type: simple_credentials
- configurable: true
default: true
name: enable_zipkin
type: boolean
- configurable: true
default: true
name: enable_write_access_logs
type: boolean
- configurable: true
constraints:
min: 1
default: 900
name: request_timeout_in_seconds
type: integer
- configurable: true
name: extra_headers_to_log
optional: true
type: string_list
- configurable: true
default: 20
name: drain_wait
type: integer
- configurable: true
default: 20
name: lb_healthy_threshold
type: integer
resource_definitions:
- configurable: true
constraints:
min: 1024
default: 1024
label: RAM
name: ram
type: integer
- configurable: true
constraints:
min: 2048
default: 2048
label: Ephemeral Disk
name: ephemeral_disk
type: integer
- configurable: false
default: 0
label: Persistent Disk
name: persistent_disk
type: integer
- configurable: true
constraints:
min: 1
power_of_two: true
default: 1
label: CPU
name: cpu
type: integer
resource_label: Router
serial: true
single_az_only: false
static_ip: 1
templates:
- consumes: |
consul_common: {from: consul_common_link, deployment: (( ..cf.deployment_name )) }
consul_client: {from: consul_client_link, deployment: (( ..cf.deployment_name )) }
consul_server: nil
name: consul_agent
release: consul
- consumes: |
nats: {from: nats, deployment: (( ..cf.deployment_name )) }
manifest: |
request_timeout_in_seconds: (( request_timeout_in_seconds.value ))
router:
backends:
enable_tls: true
max_conns: (( .properties.router_backend_max_conn.value ))
cert_chain: (( .properties.routing_backends_client_cert.cert_pem ))
private_key: (( .properties.routing_backends_client_cert.private_key_pem ))
ca_certs: |
(( .properties.routing_custom_ca_certificates.value ))
(( $ops_manager.ca_certificate ))
((( /cf/diego-instance-identity-root-ca.certificate )))
tracing:
enable_zipkin: (( enable_zipkin.value ))
write_access_logs_locally: (( enable_write_access_logs.value ))
secure_cookies: (( disable_insecure_cookies.value ))
status:
user: router_status
password: (( status_credentials.password ))
route_services_secret: ((( router-route-services-secret )))
client_cert_validation: (( .properties.router_client_cert_validation.selected_option.parsed_manifest(client_cert_validation) ))
enable_proxy: (( .properties.router_enable_proxy.value ))
disable_http: (( .properties.routing_disable_http.value ))
cipher_suites: (( .properties.gorouter_ssl_ciphers.value ))
enable_ssl: true
max_idle_connections: (( .properties.router_keepalive_connections.selected_option.parsed_manifest(max_idle_connections) ))
tls_pem: (( .properties.networking_poe_ssl_certs.parsed_manifest(routing_certificates_and_private_keys) ))
min_tls_version: (( .properties.routing_minimum_tls_version.selected_option.parsed_manifest(min_tls_version) ))
ssl_skip_validation: (( .properties.skip_cert_verify.value ))
drain_wait: (( drain_wait.value ))
load_balancer_healthy_threshold: (( lb_healthy_threshold.value ))
extra_headers_to_log: (( extra_headers_to_log.parsed_strings ))
routing_table_sharding_mode: (( .properties.routing_table_sharding_mode.selected_option.parsed_manifest(sharding_mode) ))
isolation_segments: (( .properties.routing_table_sharding_mode.selected_option.parsed_manifest(isolation_segments) ))
forwarded_client_cert: (( .properties.routing_tls_termination.selected_option.parsed_manifest(gorouter_forwarded_client_cert) ))
disable_log_forwarded_for: (( .properties.routing_log_client_ips.selected_option.parsed_manifest(disable_log_forwarded_for) ))
disable_log_source_ips: (( .properties.routing_log_client_ips.selected_option.parsed_manifest(disable_log_source_ips) ))
routing_api:
enabled: (( .properties.routing_table_sharding_mode.selected_option.parsed_manifest(routing_api_enabled) ))
uaa:
ca_cert: (( $ops_manager.ca_certificate ))
ssl:
port: 8443
clients:
gorouter:
secret: (( ..cf.uaa.gorouter_client_credentials.password ))
name: gorouter
release: routing
- manifest: |
loggregator:
tls:
ca_cert: (( $ops_manager.ca_certificate ))
metron:
cert: (( .properties.metron_tls_cert.cert_pem ))
key: (( .properties.metron_tls_cert.private_key_pem ))
name: metron_agent
release: loggregator
- manifest: |
syslog: (( .properties.system_logging.selected_option.parsed_manifest(syslog) ))
name: syslog_forwarder
release: syslog
- dynamic_ip: 1
instance_definition:
configurable: true
constraints:
min: 0
default: 3
label: Instances
name: instances
type: integer
label: Diego Cell
max_in_flight: 4%
name: isolated_diego_cell
property_blueprints:
- configurable: true
constraints:
min: 1
label: Cell Disk Capacity (in MB)
name: executor_disk_capacity
optional: true
type: integer
- configurable: true
constraints:
min: 1
label: Cell Memory Capacity (in MB)
name: executor_memory_capacity
optional: true
type: integer
- configurable: true
name: insecure_docker_registry_list
optional: true
type: string_list
- configurable: true
name: placement_tag
type: string
- configurable: false
name: silk_daemon_client_cert
type: rsa_cert_credentials
- name: network_policy_agent_cert
type: rsa_cert_credentials
resource_definitions:
- configurable: true
constraints:
min: 2048
default: 16384
label: RAM
name: ram
type: integer
- configurable: true
constraints:
min: 4096
default: 65536
label: Ephemeral Disk
name: ephemeral_disk
type: integer
- configurable: false
default: 0
label: Persistent Disk
name: persistent_disk
type: integer
- configurable: true
constraints:
min: 1
power_of_two: true
default: 2
label: CPU
name: cpu
type: integer
resource_label: Diego Cell
single_az_only: false
static_ip: 0
templates:
- consumes: |
consul_common: {from: consul_common_link, deployment: (( ..cf.deployment_name )) }
consul_client: {from: consul_client_link, deployment: (( ..cf.deployment_name )) }
consul_server: nil
manifest: |
consul:
agent:
node_name_includes_id: true
name: consul_agent
release: consul
- manifest: |
containers:
proxy:
enabled: (( .properties.rep_proxy_enabled.value ))
additional_memory_allocation_mb: 32
trusted_ca_certificates:
- ((( /cf/diego-instance-identity-root-ca.certificate )))
- (( $ops_manager.ca_certificate ))
- (( $ops_manager.trusted_certificates ))
diego:
executor:
disk_capacity_mb: (( executor_disk_capacity.value ))
memory_capacity_mb: (( executor_memory_capacity.value ))
post_setup_hook: sh -c "rm -f /home/vcap/app/.java-buildpack.log /home/vcap/app/**/.java-buildpack.log"
post_setup_user: "root"
instance_identity_ca_cert: ((( /cf/diego-instance-identity-intermediate-ca.certificate )))
instance_identity_key: ((( /cf/diego-instance-identity-intermediate-ca.private_key )))
rep:
use_azure_fault_domains: true
preloaded_rootfses: '(( .properties.enable_grootfs.value ? .properties.rep_preloaded_rootfses_grootfs.value : .properties.rep_preloaded_rootfses_garden.value ))'
placement_tags: [ (( placement_tag.value )) ]
enable_declarative_healthcheck: true
logging:
format:
timestamp: (( ..cf.properties.diego_log_timestamp_format.selected_option.parsed_manifest(timestamp_format) ))
loggregator:
use_v2_api: true
ca_cert: (( $ops_manager.ca_certificate ))
cert: (( .properties.loggregator_client_cert.cert_pem ))
key: (( .properties.loggregator_client_cert.private_key_pem ))
tls:
ca_cert: (( $ops_manager.ca_certificate ))
cert: (( .properties.rep_server_cert_v2.cert_pem ))
key: (( .properties.rep_server_cert_v2.private_key_pem ))
name: rep
release: diego
- consumes: |
nats: {from: nats, deployment: (( ..cf.deployment_name )) }
manifest: |
diego:
route_emitter:
bbs:
ca_cert: (( $ops_manager.ca_certificate ))
client_cert: (( .properties.bbs_client_cert.cert_pem ))
client_key: (( .properties.bbs_client_cert.private_key_pem ))
local_mode: true
internal_routes:
enabled: (( ..cf.properties.enable_service_discovery_for_apps.value ))
logging:
format:
timestamp: (( ..cf.properties.diego_log_timestamp_format.selected_option.parsed_manifest(timestamp_format) ))
loggregator:
ca_cert: (( $ops_manager.ca_certificate ))
cert: (( .properties.loggregator_client_cert.cert_pem ))
key: (( .properties.loggregator_client_cert.private_key_pem ))
use_v2_api: true
register_direct_instance_routes: (( ..cf.properties.container_networking_interface_plugin.selected_option.parsed_manifest(use_container_address) ))
tcp:
enabled: true
uaa:
ca_cert: (( $ops_manager.ca_certificate ))
client_name: (( ..cf.uaa.tcp_emitter_credentials.identity ))
client_secret: (( ..cf.uaa.tcp_emitter_credentials.password ))
name: route_emitter
release: diego
- manifest: |
tls:
ca_certificate: (( $ops_manager.ca_certificate ))
certificate: (( .properties.bbs_client_cert.cert_pem ))
private_key: (( .properties.bbs_client_cert.private_key_pem ))
name: cfdot
release: diego
- manifest: |
garden:
cleanup_process_dirs_on_wait: true
debug_listen_address: 127.0.0.1:17019
deny_networks:
- 0.0.0.0/0
deprecated_use_garden_shed: '(( .properties.enable_grootfs.value ? false : true ))'
destroy_containers_on_start: true
graph_cleanup_threshold_in_mb: (( .properties.garden_disk_cleanup.selected_option.parsed_manifest(graph_cleanup_threshold_in_mb) ))
http_proxy: (( $ops_manager.http_proxy ))
https_proxy: (( $ops_manager.https_proxy ))
insecure_docker_registry_list: (( insecure_docker_registry_list.parsed_strings ))
network_plugin: /var/vcap/packages/runc-cni/bin/garden-external-networker
network_plugin_extra_args:
- --configFile=/var/vcap/jobs/garden-cni/config/adapter.json
no_proxy: (( $ops_manager.no_proxy ))
persistent_image_list: '(( .properties.enable_grootfs.value ? .properties.rep_preloaded_rootfses_grootfs.value : .properties.rep_preloaded_rootfses_garden.value ))'
grootfs:
graph_cleanup_threshold_in_mb: (( .properties.garden_disk_cleanup.selected_option.parsed_manifest(graph_cleanup_threshold_in_mb) ))
insecure_docker_registry_list: (( insecure_docker_registry_list.parsed_strings ))
reserved_space_for_other_jobs_in_mb: -1
tls:
ca_cert: (( $ops_manager.ca_certificate ))
cert: (( .properties.rep_server_cert.cert_pem ))
key: (( .properties.rep_server_cert.private_key_pem ))
name: garden
release: garden-runc
- manifest: |
cflinuxfs2-rootfs:
trusted_certs: |
(( $ops_manager.trusted_certificates ))
(( $ops_manager.ca_certificate ))
((( /cf/diego-instance-identity-root-ca.certificate )))
name: cflinuxfs2-rootfs-setup
release: cflinuxfs2
- manifest: |
loggregator:
tls:
ca_cert: (( $ops_manager.ca_certificate ))
metron:
cert: (( .properties.metron_tls_cert.cert_pem ))
key: (( .properties.metron_tls_cert.private_key_pem ))
name: metron_agent
release: loggregator
- consumes: |
cf_network: { from: cf_network, deployment: (( ..cf.deployment_name )) }
manifest: |
disable: (( ..cf.properties.container_networking_interface_plugin.selected_option.parsed_manifest(disable_silk) ))
dns_servers: '(( $director.dns_release_present ? ..cf.properties.bosh_dns_servers.parsed_strings : ..cf.properties.container_networking_interface_plugin.silk.dns_servers.parsed_strings ))'
mtu: (( ..cf.properties.container_networking_interface_plugin.silk.network_mtu.value ))
iptables_logging: (( ..cf.properties.container_networking_interface_plugin.silk.enable_log_traffic.value ))
iptables_accepted_udp_logs_per_sec: (( ..cf.properties.container_networking_interface_plugin.silk.iptables_accepted_udp_logs_per_sec.value ))
iptables_denied_logs_per_sec: (( ..cf.properties.container_networking_interface_plugin.silk.iptables_denied_logs_per_sec.value ))
name: silk-cni
release: silk
- consumes: |
cf_network: {from: cf_network, deployment: (( ..cf.deployment_name )) }
manifest: |
ca_cert: (( $ops_manager.ca_certificate ))
client_cert: (( ..cf.diego_database.silk_daemon_client_cert.cert_pem ))
client_key: (( ..cf.diego_database.silk_daemon_client_cert.private_key_pem ))
disable: (( ..cf.properties.container_networking_interface_plugin.selected_option.parsed_manifest(disable_silk) ))
vtep_port: (( ..cf.properties.container_networking_interface_plugin.silk.vtep_port.value ))
name: silk-daemon
release: silk
- manifest: |
cni_config_dir: (( ..cf.properties.container_networking_interface_plugin.selected_option.parsed_manifest(cni_config_dir) ))
cni_plugin_dir: (( ..cf.properties.container_networking_interface_plugin.selected_option.parsed_manifest(cni_plugin_dir) ))
search_domains: (( .properties.cf_networking_search_domains.parsed_strings ))
name: garden-cni
release: cf-networking
- manifest: |
disable: (( ..cf.properties.container_networking_interface_plugin.selected_option.parsed_manifest(disable_silk) ))
name: netmon
release: silk
- manifest: |
ca_cert: (( $ops_manager.ca_certificate ))
client_cert: (( ..cf.properties.network_policy_agent_cert.cert_pem ))
client_key: (( ..cf.properties.network_policy_agent_cert.private_key_pem ))
disable: (( ..cf.properties.container_networking_interface_plugin.selected_option.parsed_manifest(disable_silk) ))
iptables_accepted_udp_logs_per_sec: (( ..cf.properties.container_networking_interface_plugin.silk.iptables_accepted_udp_logs_per_sec.value ))
iptables_logging: (( ..cf.properties.container_networking_interface_plugin.silk.enable_log_traffic.value ))
policy_server:
hostname: network-policy-server.service.cf.internal
name: vxlan-policy-agent
release: silk
- manifest: |
nfsv3driver: (( .properties.nfs_volume_driver.selected_option.parsed_manifest(nfsv3driver_properties) ))
name: nfsv3driver
release: nfs-volume
- manifest: |
syslog: (( .properties.system_logging.selected_option.parsed_manifest(syslog) ))
name: syslog_forwarder
release: syslog
- consumes: |
service-discovery-controller: {from: service-discovery-controller, deployment: (( ..cf.deployment_name )) }
manifest: |
cf_app_sd_disable: "(( ..cf.properties.enable_service_discovery_for_apps.value ? false : true ))"
dnshttps:
client:
tls:
certificate: (( .properties.service_discovery_client_tls.cert_pem ))
private_key: (( .properties.service_discovery_client_tls.private_key_pem ))
server:
ca: (( $ops_manager.ca_certificate ))
name: bosh-dns-adapter
release: cf-networking
- name: iptables-logger
release: silk
kiln_metadata:
metadata_git_sha: c839fc2b06ad4b6de68a7587cc915d36d5027f5c
label: PCF Isolation Segment
metadata_version: "2.2"
minimum_version_for_upgrade: 2.1.0
name: p-isolation-segment
post_deploy_errands: []
product_version: 2.2.4
property_blueprints:
- configurable: false
name: bbs_client_cert
type: rsa_cert_credentials
- configurable: true
name: cf_networking_search_domains
optional: true
type: string_list
- configurable: true
default: enable
name: container_networking
option_templates:
- name: enable
select_value: enable
- name: disable
select_value: disable
type: selector
- configurable: true
default: true
name: enable_grootfs
type: boolean
- configurable: true
default: threshold
name: garden_disk_cleanup
option_templates:
- name: never
named_manifests:
- manifest: |
-1
name: graph_cleanup_threshold_in_mb
select_value: never
- name: routine
named_manifests:
- manifest: |
0
name: graph_cleanup_threshold_in_mb
select_value: routine
- name: threshold
named_manifests:
- manifest: |
(( .properties.garden_disk_cleanup.threshold.cleanup_threshold_in_mb.value ))
name: graph_cleanup_threshold_in_mb
property_blueprints:
- constraints:
min: 1
default: 10240
name: cleanup_threshold_in_mb
type: integer
select_value: threshold
type: selector
- configurable: true
default: ECDHE-RSA-AES128-GCM-SHA256:TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
name: gorouter_ssl_ciphers
type: text
- configurable: true
default: none
name: haproxy_client_cert_validation
option_templates:
- name: none
named_manifests:
- manifest: |
false
name: client_cert_validation
select_value: none
- name: request
named_manifests:
- manifest: |
true
name: client_cert_validation
select_value: request
type: selector
- configurable: false
name: haproxy_client_certificate
type: rsa_cert_credentials
- configurable: true
default: enable
name: haproxy_forward_tls
option_templates:
- name: enable
named_manifests:
- manifest: |
443
name: backend_port
- manifest: |
"verify"
name: backend_ssl
property_blueprints:
- configurable: true
name: backend_ca
type: text
select_value: enable
- name: disable
named_manifests:
- manifest: |
80
name: backend_port
- manifest: |
"off"
name: backend_ssl
select_value: disable
type: selector
- configurable: true
default: disable
name: haproxy_hsts_support
option_templates:
- name: enable
named_manifests:
- manifest: |
true
name: hsts_enable
- manifest: |
(( .properties.haproxy_hsts_support.enable.include_subdomains.value ))
name: hsts_include_subdomains
- manifest: |
(( .properties.haproxy_hsts_support.enable.max_age.value ))
name: hsts_max_age
- manifest: |
(( .properties.haproxy_hsts_support.enable.enable_preload.value ))
name: hsts_preload
property_blueprints:
- configurable: true
default: 31536000
name: max_age
type: integer
- configurable: true
default: false
name: include_subdomains
type: boolean
- configurable: true
default: false
name: enable_preload
type: boolean
select_value: enable
- name: disable
named_manifests:
- manifest: |
false
name: hsts_enable
- manifest: |
false
name: hsts_include_subdomains
- manifest: |
0
name: hsts_max_age
- manifest: |
false
name: hsts_preload
select_value: disable
type: selector
- configurable: true
default: 16384
name: haproxy_max_buffer_size
type: integer
- configurable: true
default: DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384
name: haproxy_ssl_ciphers
type: text
- configurable: false
name: loggregator_client_cert
type: rsa_cert_credentials
- configurable: false
default:
domains:
- metron
name: metron_tls_cert
type: rsa_cert_credentials
- configurable: true
name: networking_poe_ssl_certs
named_manifests:
- manifest: |
cert_chain: (( current_record.certificate.cert_pem ))
private_key: (( current_record.certificate.private_key_pem ))
name: routing_certificates_and_private_keys
property_blueprints:
- configurable: true
name: name
type: string
- configurable: true
name: certificate
type: rsa_cert_credentials
type: collection
- configurable: true
default: terminate_at_router
name: networking_point_of_entry
option_templates:
- name: terminate_at_router
select_value: terminate_at_router
- name: terminate_before_router
select_value: terminate_before_router
type: selector
- configurable: true
default: enable
name: nfs_volume_driver
option_templates:
- name: enable
named_manifests:
- manifest: |
disable: false
ldap_svc_user: (( .properties.nfs_volume_driver.enable.ldap_service_account_user.value ))
ldap_svc_password: (( .properties.nfs_volume_driver.enable.ldap_service_account_password.value ))
ldap_host: (( .properties.nfs_volume_driver.enable.ldap_server_host.value ))
ldap_port: (( .properties.nfs_volume_driver.enable.ldap_server_port.value ))
ldap_user_fqdn: (( .properties.nfs_volume_driver.enable.ldap_user_fqdn.value ))
name: nfsv3driver_properties
property_blueprints:
- configurable: true
name: ldap_service_account_user
optional: true
type: string
- configurable: true
name: ldap_service_account_password
optional: true
type: secret
- configurable: true
name: ldap_server_host
optional: true
type: string
- configurable: true
name: ldap_server_port
optional: true
type: integer
- configurable: true
name: ldap_user_fqdn
optional: true
type: string
select_value: enable
- name: disable
named_manifests:
- manifest: |
disable: true
name: nfsv3driver_properties
select_value: disable
type: selector
- configurable: false
default:
- cflinuxfs2:/var/vcap/packages/cflinuxfs2/rootfs
name: rep_preloaded_rootfses_garden
type: string_list
- configurable: false
default:
- cflinuxfs2:/var/vcap/packages/cflinuxfs2/rootfs.tar
name: rep_preloaded_rootfses_grootfs
type: string_list
- configurable: true
default: false
name: rep_proxy_enabled
type: boolean
- configurable: false
default:
domains:
- cell.service.cf.internal
- '*.cell.service.cf.internal'
name: rep_server_cert
type: rsa_cert_credentials
- configurable: false
default:
domains:
- '*.cell.service.cf.internal'
- cell.service.cf.internal
- 127.0.0.1
- localhost
name: rep_server_cert_v2
type: rsa_cert_credentials
- configurable: true
default: 500
name: router_backend_max_conn
type: integer
- configurable: true
default: request
name: router_client_cert_validation
option_templates:
- name: none
named_manifests:
- manifest: |
none
name: client_cert_validation
select_value: none
- name: request
named_manifests:
- manifest: |
request
name: client_cert_validation
select_value: request
- name: require
named_manifests:
- manifest: |
require
name: client_cert_validation
select_value: require
type: selector
- configurable: true
default: false
name: router_enable_proxy
type: boolean
- configurable: true
default: enable
name: router_keepalive_connections
option_templates:
- name: enable
named_manifests:
- manifest: |
49000
name: max_idle_connections
select_value: enable
- name: disable
named_manifests:
- manifest: |
0
name: max_idle_connections
select_value: disable
type: selector
- configurable: false
name: routing_backends_client_cert
type: rsa_cert_credentials
- configurable: true
name: routing_custom_ca_certificates
optional: true
type: text
- configurable: true
default: false
name: routing_disable_http
type: boolean
- configurable: true
default: log_client_ips
name: routing_log_client_ips
option_templates:
- name: log_client_ips
named_manifests:
- manifest: |
false
name: disable_log_forwarded_for
- manifest: |
false
name: disable_log_source_ips
select_value: log_client_ips
- name: disable_x_forwarded_for
named_manifests:
- manifest: |
true
name: disable_log_forwarded_for
- manifest: |
false
name: disable_log_source_ips
select_value: disable_x_forwarded_for
- name: disable_all_log_client_ips
named_manifests:
- manifest: |
true
name: disable_log_forwarded_for
- manifest: |
true
name: disable_log_source_ips
select_value: disable_all_log_client_ips
type: selector
- configurable: true
default: tls_v1_2
name: routing_minimum_tls_version
option_templates:
- name: tls_v1_0
named_manifests:
- manifest: |
TLSv1.0
name: min_tls_version
- manifest: |
false
name: disable_tls_10
- manifest: |
false
name: disable_tls_11
select_value: tls_v1_0
- name: tls_v1_1
named_manifests:
- manifest: |
TLSv1.1
name: min_tls_version
- manifest: |
true
name: disable_tls_10
- manifest: |
false
name: disable_tls_11
select_value: tls_v1_1
- name: tls_v1_2
named_manifests:
- manifest: |
TLSv1.2
name: min_tls_version
- manifest: |
true
name: disable_tls_10
- manifest: |
true
name: disable_tls_11
select_value: tls_v1_2
type: selector
- configurable: true
default: isolation_segment_only
name: routing_table_sharding_mode
option_templates:
- name: isolation_segment_only
named_manifests:
- manifest: |
segments
name: sharding_mode
- manifest: |
['(( .isolated_diego_cell.placement_tag.value ))']
name: isolation_segments
- manifest: |
false
name: routing_api_enabled
select_value: isolation_segment_only
- name: no_isolation_segment
named_manifests:
- manifest: |
shared-and-segments
name: sharding_mode
- manifest: |
[]
name: isolation_segments
- manifest: |
true
name: routing_api_enabled
select_value: no_isolation_segment
type: selector
- configurable: true
default: load_balancer
name: routing_tls_termination
option_templates:
- name: load_balancer
named_manifests:
- manifest: |
always_forward
name: gorouter_forwarded_client_cert
select_value: load_balancer
- name: ha_proxy
named_manifests:
- manifest: |
forward
name: gorouter_forwarded_client_cert
select_value: ha_proxy
- name: router
named_manifests:
- manifest: |
sanitize_set
name: gorouter_forwarded_client_cert
select_value: router
type: selector
- configurable: false
default:
domains:
- service-discovery-controller.service.cf.internal
name: service_discovery_client_tls
type: rsa_cert_credentials
- configurable: true
name: skip_cert_verify
type: boolean
- configurable: true
default: disabled
name: system_logging
option_templates:
- name: disabled
named_manifests:
- manifest: |
migration:
disabled: true
name: syslog
- manifest: ""
name: syslog_url
select_value: disabled
- name: enabled
named_manifests:
- manifest: |
address: (( .properties.system_logging.enabled.host.value ))
port: (( .properties.system_logging.enabled.port.value ))
transport: (( .properties.system_logging.enabled.protocol.value ))
tls_enabled: (( .properties.system_logging.enabled.tls_enabled.value ))
ca_cert: (( .properties.system_logging.enabled.tls_ca_cert.value ))
permitted_peer: (( .properties.system_logging.enabled.tls_permitted_peer.value ))
use_tcp_for_file_forwarding_local_transport: (( .properties.system_logging.enabled.use_tcp_for_file_forwarding_local_transport.value ))
custom_rule: |
(( .properties.system_logging.enabled.syslog_rule.value ))
if ($programname startswith "vcap.") then stop
"(( ..cf.properties.container_networking_interface_plugin.silk.enable_log_traffic.value ? .properties.system_logging.selected_option.parsed_manifest(iptables_logging_enabled) : .properties.system_logging.selected_option.parsed_manifest(iptables_logging_disabled) ))"
name: syslog
- manifest: |
if $programname == 'kernel' and ($msg contains "DENY_" or $msg contains "OK_") then {
-/var/log/kern.log
stop
}
name: iptables_logging_enabled
- manifest: |
""
name: iptables_logging_disabled
property_blueprints:
- configurable: true
name: host
type: network_address
- configurable: true
name: port
type: port
- configurable: true
name: protocol
options:
- label: TCP protocol
name: tcp
- label: RELP protocol
name: relp
- label: UDP protocol
name: udp
type: dropdown_select
- configurable: true
default: false
name: tls_enabled
optional: true
type: boolean
- configurable: true
name: tls_permitted_peer
optional: true
type: string
- configurable: true
name: tls_ca_cert
optional: true
type: ca_certificate
- configurable: true
default: false
name: use_tcp_for_file_forwarding_local_transport
type: boolean
- configurable: true
name: syslog_rule
optional: true
type: text
select_value: enabled
type: selector
provides_product_versions:
- name: p-isolation-segment
version: 2.2.4
rank: 89
releases:
- file: cf-networking-2.3.0-3586.27.0.tgz
name: cf-networking
sha1: aac4a8fd00bc1f772c2f7176dfcfc11d1d1154e9
version: 2.3.0
- file: cflinuxfs2-1.228.0-3586.27.0.tgz
name: cflinuxfs2
sha1: 70903272eed6efd881e010872d6acae1ce8df794
version: 1.228.0
- file: consul-195.0.0-3586.27.0.tgz
name: consul
sha1: 4e65199935fbc201abe8a8ac6e23e7780acc3170
version: "195"
- file: diego-2.8.2-3586.27.0.tgz
name: diego
sha1: f2853efe9805c835dfe4848faffb7895df469e0c
version: 2.8.2
- file: garden-runc-1.13.3-3586.27.0.tgz
name: garden-runc
sha1: 8621c42d248395d4f3353f6b1495b1312028ca3e
version: 1.13.3
- file: haproxy-8.7.0-3586.27.0.tgz
name: haproxy
sha1: 69d1a8fff8bb0ff52535a0350237edd1439c9a71
version: 8.7.0
- file: loggregator-102.4.0-3586.27.0.tgz
name: loggregator
sha1: 4efb91dbae79d0d81ca5e5f3ac696737e27a6b29
version: "102.4"
- file: nfs-volume-1.2.2-3586.27.0.tgz
name: nfs-volume
sha1: bb0f07becf7640f7e7d94374721d3281973d3de0
version: 1.2.2
- file: routing-0.178.2-3586.27.0.tgz
name: routing
sha1: 2ea9e434dd96fa87d7d09f7f44426257ee2d26a3
version: 0.178.2
- file: silk-2.3.0-3586.27.0.tgz
name: silk
sha1: f51667fc3d2389060d9f3c2cbea2625f2d8a0ae3
version: 2.3.0
- file: syslog-11.3.2-3586.27.0.tgz
name: syslog
sha1: 737e66d64a916aa122133dd8565875ce4e89d8f3
version: 11.3.2
requires_product_versions:
- name: cf
version: ~> 2.2.0
serial: false
stemcell_criteria:
os: ubuntu-trusty
version: "3586.27"
variables:
- name: router-route-services-secret
type: password
| {
"pile_set_name": "Github"
} |
mode.id=processing.mode.java.JavaMode
mode=Java
| {
"pile_set_name": "Github"
} |
--------------------------------------------------------------------------
-- Lmod License
--------------------------------------------------------------------------
--
-- Lmod is licensed under the terms of the MIT license reproduced below.
-- This means that Lmod is free software and can be used for both academic
-- and commercial purposes at absolutely no cost.
--
-- ----------------------------------------------------------------------
--
-- Copyright (C) 2008-2018 Robert McLay
--
-- Permission is hereby granted, free of charge, to any person obtaining
-- a copy of this software and associated documentation files (the
-- "Software"), to deal in the Software without restriction, including
-- without limitation the rights to use, copy, modify, merge, publish,
-- distribute, sublicense, and/or sell copies of the Software, and to
-- permit persons to whom the Software is furnished to do so, subject
-- to the following conditions:
--
-- The above copyright notice and this permission notice shall be
-- included in all copies or substantial portions of the Software.
--
-- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-- EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-- OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-- NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-- BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-- ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-- CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-- THE SOFTWARE.
--
--------------------------------------------------------------------------
--------------------------------------------------------------------------
-- Bash: This is a derived class from BaseShell. This classes knows how
-- to expand the environment variable into bash syntax.
require("strict")
local BaseShell = require("BaseShell")
local Bash = inheritsFrom(BaseShell)
local dbg = require("Dbg"):dbg()
local Var = require("Var")
local concatTbl = table.concat
local stdout = io.stdout
Bash.my_name = "bash"
--------------------------------------------------------------------------
-- Bash:alias(): Either define or undefine a bash shell alias.
-- Modify module definition of function so that there is
-- one and only one semicolon at the end.
function Bash.alias(self, k, v)
if (not v) then
stdout:write("unalias ",k," 2> /dev/null || true;\n")
dbg.print{ "unalias ",k," 2> /dev/null || true;\n"}
else
v = v:gsub(";%s*$",""):multiEscaped()
stdout:write("alias ",k,"=",v,";\n")
dbg.print{ "alias ",k,"=",v,";\n"}
end
end
--------------------------------------------------------------------------
-- Bash:shellFunc(): Either define or undefine a bash shell function.
-- Modify module definition of function so that there is
-- one and only one semicolon at the end.
function Bash.shellFunc(self, k, v)
if (not v) then
stdout:write("unset -f ",k," 2> /dev/null || true;\n")
dbg.print{ "unset -f ",k," 2> /dev/null || true;\n"}
else
local func = v[1]:gsub(";%s*$","")
stdout:write(k," () { ",func,"; };\n")
dbg.print{ k," () { ",func,"; };\n"}
end
end
--------------------------------------------------------------------------
-- Bash:expandVar(): Define either a global or local variable in bash
-- syntax
function Bash.expandVar(self, k, v, vType)
local lineA = {}
if (k:find("%.")) then
return
end
v = tostring(v):multiEscaped()
lineA[#lineA + 1] = k
lineA[#lineA + 1] = "="
lineA[#lineA + 1] = v
lineA[#lineA + 1] = ";\n"
if (vType ~= "local_var") then
lineA[#lineA + 1] = "export "
lineA[#lineA + 1] = k
lineA[#lineA + 1] = ";\n"
end
local line = concatTbl(lineA,"")
stdout:write(line)
if (k:find('^_ModuleTable') == nil) then
dbg.print{ line}
end
end
--------------------------------------------------------------------------
-- Bash:unset() unset an environment variable.
function Bash.unset(self, k, vType)
if (k:find("%.")) then
return
end
stdout:write("unset ",k,";\n")
dbg.print{ "unset ",k,";\n"}
end
--------------------------------------------------------------------------
-- Bash:real_shell(): Return true if the output shell is "real" or not.
-- This base function returns false. Bash, Csh
-- and Fish should return true.
function Bash.real_shell(self)
return true
end
return Bash
| {
"pile_set_name": "Github"
} |
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace MineCase.Engine
{
/// <summary>
/// 事件聚合器接口
/// </summary>
public interface IEventAggregator
{
/// <summary>
/// 订阅事件
/// </summary>
/// <param name="subscriber">订阅主体</param>
void Subscribe(object subscriber);
/// <summary>
/// 注销订阅事件
/// </summary>
/// <param name="subscriber">订阅主体</param>
void Unsubscribe(object subscriber);
/// <summary>
/// 发布消息
/// </summary>
/// <param name="message">消息</param>
/// <param name="marshal">封送器</param>
void Publish(object message, Action<Action> marshal);
}
}
| {
"pile_set_name": "Github"
} |
/*
* Generated by class-dump 3.3.4 (64 bit).
*
* class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2011 by Steve Nygard.
*/
#import <XDBase/XDUMLPackageImp.h>
@interface XDModelPackage : XDUMLPackageImp
{
}
- (id)name;
- (id)qualifiedName;
@end
| {
"pile_set_name": "Github"
} |
//---------------------------------------------------------------------------//
// Copyright (c) 2013 Kyle Lutz <[email protected]>
//
// Distributed under the Boost Software License, Version 1.0
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt
//
// See http://boostorg.github.com/compute for more information.
//---------------------------------------------------------------------------//
#ifndef BOOST_COMPUTE_DETAIL_PRINT_RANGE_HPP
#define BOOST_COMPUTE_DETAIL_PRINT_RANGE_HPP
#include <vector>
#include <iostream>
#include <iterator>
#include <boost/compute/algorithm/copy.hpp>
#include <boost/compute/container/vector.hpp>
#include <boost/compute/detail/is_buffer_iterator.hpp>
#include <boost/compute/detail/iterator_range_size.hpp>
namespace boost {
namespace compute {
namespace detail {
template<class InputIterator>
inline void print_range(InputIterator first,
InputIterator last,
command_queue &queue,
typename boost::enable_if<
is_buffer_iterator<InputIterator>
>::type* = 0)
{
typedef typename
std::iterator_traits<InputIterator>::value_type
value_type;
const size_t size = iterator_range_size(first, last);
// copy values to temporary vector on the host
std::vector<value_type> tmp(size);
::boost::compute::copy(first, last, tmp.begin(), queue);
// print values
std::cout << "[ ";
for(size_t i = 0; i < size; i++){
std::cout << tmp[i];
if(i != size - 1){
std::cout << ", ";
}
}
std::cout << " ]" << std::endl;
}
template<class InputIterator>
inline void print_range(InputIterator first,
InputIterator last,
command_queue &queue,
typename boost::enable_if_c<
!is_buffer_iterator<InputIterator>::value
>::type* = 0)
{
typedef typename
std::iterator_traits<InputIterator>::value_type
value_type;
const context &context = queue.get_context();
const size_t size = iterator_range_size(first, last);
// copy values to temporary vector on the device
::boost::compute::vector<value_type> tmp(size, context);
::boost::compute::copy(first, last, tmp.begin(), queue);
print_range(tmp.begin(), tmp.end(), queue);
}
} // end detail namespace
} // end compute namespace
} // end boost namespace
#endif // BOOST_COMPUTE_DETAIL_PRINT_RANGE_HPP
| {
"pile_set_name": "Github"
} |
/*
* reserved comment block
* DO NOT REMOVE OR ALTER!
*/
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.sun.org.apache.xml.internal.security.keys.content.keyvalues;
import java.math.BigInteger;
import java.security.Key;
import java.security.KeyFactory;
import java.security.NoSuchAlgorithmException;
import java.security.PublicKey;
import java.security.interfaces.RSAPublicKey;
import java.security.spec.InvalidKeySpecException;
import java.security.spec.RSAPublicKeySpec;
import com.sun.org.apache.xml.internal.security.exceptions.XMLSecurityException;
import com.sun.org.apache.xml.internal.security.utils.Constants;
import com.sun.org.apache.xml.internal.security.utils.I18n;
import com.sun.org.apache.xml.internal.security.utils.SignatureElementProxy;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
public class RSAKeyValue extends SignatureElementProxy implements KeyValueContent {
/**
* Constructor RSAKeyValue
*
* @param element
* @param baseURI
* @throws XMLSecurityException
*/
public RSAKeyValue(Element element, String baseURI) throws XMLSecurityException {
super(element, baseURI);
}
/**
* Constructor RSAKeyValue
*
* @param doc
* @param modulus
* @param exponent
*/
public RSAKeyValue(Document doc, BigInteger modulus, BigInteger exponent) {
super(doc);
addReturnToSelf();
this.addBigIntegerElement(modulus, Constants._TAG_MODULUS);
this.addBigIntegerElement(exponent, Constants._TAG_EXPONENT);
}
/**
* Constructor RSAKeyValue
*
* @param doc
* @param key
* @throws IllegalArgumentException
*/
public RSAKeyValue(Document doc, Key key) throws IllegalArgumentException {
super(doc);
addReturnToSelf();
if (key instanceof RSAPublicKey ) {
this.addBigIntegerElement(
((RSAPublicKey) key).getModulus(), Constants._TAG_MODULUS
);
this.addBigIntegerElement(
((RSAPublicKey) key).getPublicExponent(), Constants._TAG_EXPONENT
);
} else {
Object exArgs[] = { Constants._TAG_RSAKEYVALUE, key.getClass().getName() };
throw new IllegalArgumentException(I18n.translate("KeyValue.IllegalArgument", exArgs));
}
}
/** {@inheritDoc} */
public PublicKey getPublicKey() throws XMLSecurityException {
try {
KeyFactory rsaFactory = KeyFactory.getInstance("RSA");
RSAPublicKeySpec rsaKeyspec =
new RSAPublicKeySpec(
this.getBigIntegerFromChildElement(
Constants._TAG_MODULUS, Constants.SignatureSpecNS
),
this.getBigIntegerFromChildElement(
Constants._TAG_EXPONENT, Constants.SignatureSpecNS
)
);
PublicKey pk = rsaFactory.generatePublic(rsaKeyspec);
return pk;
} catch (NoSuchAlgorithmException ex) {
throw new XMLSecurityException(ex);
} catch (InvalidKeySpecException ex) {
throw new XMLSecurityException(ex);
}
}
/** {@inheritDoc} */
public String getBaseLocalName() {
return Constants._TAG_RSAKEYVALUE;
}
}
| {
"pile_set_name": "Github"
} |
/*-
* Copyright (c) 1990, 1993
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* Cimarron D. Taylor of the University of California, Berkeley.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)find.h 8.1 (Berkeley) 6/6/93
*/
/* node type */
enum ntype {
N_AND = 1, /* must start > 0 */
N_ATIME, N_CLOSEPAREN, N_CTIME, N_DEPTH, N_EXEC, N_EXPR, N_FOLLOW,
N_FSTYPE, N_GROUP, N_INUM, N_LINKS, N_LS, N_MTIME, N_NAME, N_NEWER,
N_NOGROUP, N_NOT, N_NOUSER, N_OK, N_OPENPAREN, N_OR, N_PATH,
N_PERM, N_PRINT, N_PRUNE, N_SIZE, N_TYPE, N_USER, N_XDEV,
};
/* node definition */
typedef struct _plandata {
struct _plandata *next; /* next node */
int (*eval) /* node evaluation function */
__P((struct _plandata *, FTSENT *));
#define F_EQUAL 1 /* [acm]time inum links size */
#define F_LESSTHAN 2
#define F_GREATER 3
#define F_NEEDOK 1 /* exec ok */
#define F_MTFLAG 1 /* fstype */
#define F_MTTYPE 2
#define F_ATLEAST 1 /* perm */
int flags; /* private flags */
enum ntype type; /* plan node type */
union {
gid_t _g_data; /* gid */
ino_t _i_data; /* inode */
mode_t _m_data; /* mode mask */
nlink_t _l_data; /* link count */
off_t _o_data; /* file size */
time_t _t_data; /* time value */
uid_t _u_data; /* uid */
short _mt_data; /* mount flags */
struct _plandata *_p_data[2]; /* PLAN trees */
struct _ex {
char **_e_argv; /* argv array */
char **_e_orig; /* original strings */
int *_e_len; /* allocated length */
} ex;
char *_a_data[2]; /* array of char pointers */
char *_c_data; /* char pointer */
} p_un;
} PLAN;
#define a_data p_un._a_data
#define c_data p_un._c_data
#define i_data p_un._i_data
#define g_data p_un._g_data
#define l_data p_un._l_data
#define m_data p_un._m_data
#define mt_data p_un._mt_data
#define o_data p_un._o_data
#define p_data p_un._p_data
#define t_data p_un._t_data
#define u_data p_un._u_data
#define e_argv p_un.ex._e_argv
#define e_orig p_un.ex._e_orig
#define e_len p_un.ex._e_len
typedef struct _option {
char *name; /* option name */
enum ntype token; /* token type */
PLAN *(*create)(); /* create function: DON'T PROTOTYPE! */
#define O_NONE 0x01 /* no call required */
#define O_ZERO 0x02 /* pass: nothing */
#define O_ARGV 0x04 /* pass: argv, increment argv */
#define O_ARGVP 0x08 /* pass: *argv, N_OK || N_EXEC */
int flags;
} OPTION;
#include "extern.h"
| {
"pile_set_name": "Github"
} |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.nifi.processors.kite;
import com.google.common.base.Splitter;
import com.google.common.collect.Iterators;
import com.google.common.collect.Maps;
import java.util.Map;
class FailureTracker {
private static final Splitter REASON_SEPARATOR = Splitter.on(':').limit(2);
private final Map<String, String> examples = Maps.newLinkedHashMap();
private final Map<String, Integer> occurrences = Maps.newLinkedHashMap();
long count = 0L;
public void add(Throwable throwable) {
add(reason(throwable));
}
public void add(String reason) {
count += 1;
String problem = Iterators.getNext(REASON_SEPARATOR.split(reason).iterator(), "Unknown");
if (examples.containsKey(problem)) {
occurrences.put(problem, occurrences.get(problem) + 1);
} else {
examples.put(problem, reason);
occurrences.put(problem, 1);
}
}
public long count() {
return count;
}
public String summary() {
boolean first = true;
StringBuilder sb = new StringBuilder();
for (String problem : examples.keySet()) {
if (first) {
first = false;
} else {
sb.append(", ");
}
sb.append(examples.get(problem));
int similar = occurrences.get(problem) - 1;
if (similar == 1) {
sb.append(" (1 similar failure)");
} else if (similar > 1) {
sb.append(" (").append(similar).append(" similar failures)");
}
}
return sb.toString();
}
private static String reason(Throwable t) {
StringBuilder sb = new StringBuilder();
for (Throwable current = t; current != null; current = current.getCause()) {
if (current != t) {
sb.append(": ");
}
sb.append(current.getMessage());
}
return sb.toString();
}
}
| {
"pile_set_name": "Github"
} |
{
"images" : [
{
"idiom" : "universal",
"filename" : "Profile_manIcon.png",
"scale" : "1x"
},
{
"idiom" : "universal",
"filename" : "[email protected]",
"scale" : "2x"
},
{
"idiom" : "universal",
"filename" : "[email protected]",
"scale" : "3x"
}
],
"info" : {
"version" : 1,
"author" : "xcode"
}
} | {
"pile_set_name": "Github"
} |
{
"description": "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.",
"properties": {
"continue": {
"description": "continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response.",
"type": [
"string",
"null"
]
},
"resourceVersion": {
"description": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency",
"type": [
"string",
"null"
]
},
"selfLink": {
"description": "selfLink is a URL representing this object. Populated by the system. Read-only.",
"type": [
"string",
"null"
]
}
},
"$schema": "http://json-schema.org/schema#",
"type": "object"
} | {
"pile_set_name": "Github"
} |
var extend = require('util')._extend
function constructObject(initialObject) {
initialObject = initialObject || {}
return {
extend: function (object) {
return constructObject(extend(initialObject, object))
},
done: function () {
return initialObject
}
}
}
function constructOptionsFrom(uri, options) {
var params = constructObject()
if (typeof uri === 'object') params.extend(uri)
if (typeof uri === 'string') params.extend({uri: uri})
params.extend(options)
return params.done()
}
function filterForCallback(values) {
var callbacks = values.filter(isFunction)
return callbacks[0]
}
function isFunction(value) {
return typeof value === 'function'
}
function paramsHaveRequestBody(params) {
return (
params.options.body ||
params.options.requestBodyStream ||
(params.options.json && typeof params.options.json !== 'boolean') ||
params.options.multipart
)
}
exports.isFunction = isFunction
exports.constructObject = constructObject
exports.constructOptionsFrom = constructOptionsFrom
exports.filterForCallback = filterForCallback
exports.paramsHaveRequestBody = paramsHaveRequestBody
| {
"pile_set_name": "Github"
} |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
syntax = "proto2";
package pulsar.proto;
option java_package = "org.apache.pulsar.common.api.proto";
option optimize_for = LITE_RUNTIME;
enum MarkerType {
UNKNOWN_MARKER = 0;
// Replicated subscription markers
REPLICATED_SUBSCRIPTION_SNAPSHOT_REQUEST = 10;
REPLICATED_SUBSCRIPTION_SNAPSHOT_RESPONSE = 11;
REPLICATED_SUBSCRIPTION_SNAPSHOT = 12;
REPLICATED_SUBSCRIPTION_UPDATE = 13;
// Next markers start at 20
TXN_COMMITTING = 20;
TXN_COMMIT = 21;
TXN_ABORT = 22;
}
/// --- Replicated subscriptions ---
// A cluster uses this message to request the current
// message id from all the other clusters.
message ReplicatedSubscriptionsSnapshotRequest {
required string snapshot_id = 1;
optional string source_cluster = 2;
}
// When a cluster receives the snapshot request, it replies
// by sending back the response (only to original asking cluster)
message ReplicatedSubscriptionsSnapshotResponse {
required string snapshot_id = 1;
optional ClusterMessageId cluster = 2;
}
// This message is used to store the snapshot in the
// local topic. It's not meant to be replicated to other
// clusters
message ReplicatedSubscriptionsSnapshot {
required string snapshot_id = 1;
optional MessageIdData local_message_id = 2;
repeated ClusterMessageId clusters = 3;
}
// When the replicated subscription mark-delete position
// is updated in the source cluster, this message will be
// sent to all clusters to updated the mirrored subscriptions
message ReplicatedSubscriptionsUpdate {
required string subscription_name = 1;
repeated ClusterMessageId clusters = 2;
}
// Represent one cluster and an associated message id.
// The message id is local to that particular cluster
message ClusterMessageId {
required string cluster = 1;
required MessageIdData message_id = 2;
}
message MessageIdData {
required uint64 ledger_id = 1;
required uint64 entry_id = 2;
}
/// --- Transaction marker ---
message TxnCommitMarker {
required MessageIdData message_id = 1;
}
| {
"pile_set_name": "Github"
} |
# Build Options
# change to "no" to disable the options, or define them in the Makefile in
# the appropriate keymap folder that will get included automatically
#
CONSOLE_ENABLE = no # Console for debug(+400)
| {
"pile_set_name": "Github"
} |
" Vim indent file
" Language: Django HTML template
" Maintainer: Dave Hodder <[email protected]>
" Last Change: 2007 Jan 25
" Only load this indent file when no other was loaded.
if exists("b:did_indent")
finish
endif
" Use HTML formatting rules.
runtime! indent/html.vim
| {
"pile_set_name": "Github"
} |
/*
* Copyright 2010-2019 JetBrains s.r.o. and Kotlin Programming Language contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package org.jetbrains.kotlin.tools.projectWizard.templates
import org.jetbrains.kotlin.tools.projectWizard.ir.buildsystem.ModuleIR
import org.jetbrains.kotlin.tools.projectWizard.ir.buildsystem.gradle.multiplatform.NativeTargetInternalIR
import org.jetbrains.kotlin.tools.projectWizard.ir.buildsystem.gradle.multiplatform.TargetConfigurationIR
import org.jetbrains.kotlin.tools.projectWizard.ir.buildsystem.withIrs
import org.jetbrains.kotlin.tools.projectWizard.plugins.kotlin.ModuleType
import org.jetbrains.kotlin.tools.projectWizard.settings.buildsystem.SourcesetType
import org.jetbrains.kotlin.tools.projectWizard.KotlinNewProjectWizardBundle
import org.jetbrains.kotlin.tools.projectWizard.core.*
import org.jetbrains.kotlin.tools.projectWizard.core.safeAs
import org.jetbrains.kotlin.tools.projectWizard.moduleConfigurators.NativeTargetConfigurator
import org.jetbrains.kotlin.tools.projectWizard.settings.buildsystem.Module
class NativeConsoleApplicationTemplate : Template() {
override val title: String = KotlinNewProjectWizardBundle.message("module.template.native.console.title")
override val description: String = KotlinNewProjectWizardBundle.message("module.template.native.console.description")
override val moduleTypes: Set<ModuleType> = setOf(ModuleType.native)
override val id: String = "nativeConsoleApp"
override fun isApplicableTo(
reader: Reader,
module: Module
): Boolean =
module.configurator.safeAs<NativeTargetConfigurator>()?.isDesktopTarget == true
override fun updateTargetIr(module: ModuleIR, targetConfigurationIR: TargetConfigurationIR): TargetConfigurationIR =
targetConfigurationIR.withIrs(NativeTargetInternalIR("main"))
override fun Reader.getFileTemplates(module: ModuleIR): List<FileTemplateDescriptorWithPath> = buildList {
+(FileTemplateDescriptor("$id/main.kt.vm", "main.kt".asPath()) asSrcOf SourcesetType.main)
}
} | {
"pile_set_name": "Github"
} |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# module of pack whl installer for Paddle-lite
import shutil
import os
from setuptools import setup, Distribution
class BinaryDistribution(Distribution):
'binary distribution'
def has_ext_modules(foo):
return True
# get paddle-lite version, if it's not based on a release tag, we use commit id instead
PADDLELITE_COMMITE = "@PADDLE_LITE_COMMIT@"
PADDLELITE_TAG = "@PADDLE_LITE_TAG@"
if PADDLELITE_TAG == "":
PADDLELITE_VERSION = PADDLELITE_COMMITE
else:
PADDLELITE_VERSION = PADDLELITE_TAG
# core lib of paddlelite is stored as lite.so
files = os.listdir('${PADDLE_BINARY_DIR}')
INFERENCE_LITE_LIB_PATH = ''
for file in files:
if file.find('inference_lite_lib') == 0:
INFERENCE_LITE_LIB_PATH = '${PADDLE_BINARY_DIR}/' + file
break
LITE_PATH = INFERENCE_LITE_LIB_PATH + '/python/install/lite'
PACKAGE_DATA = {'paddlelite': ['lite.so' if os.name!='nt' else 'lite.pyd']}
# copy scripts of paddlelite
shutil.copy('${PADDLE_SOURCE_DIR}/lite/api/python/bin/paddle_lite_opt', LITE_PATH)
# put all thirdparty libraries in paddlelite.libs
PACKAGE_DATA['paddlelite.libs'] = []
LIB_PATH = INFERENCE_LITE_LIB_PATH + '/python/install/libs/'
if '${WITH_MKL}' == 'ON':
shutil.copy('${MKLML_SHARED_IOMP_LIB}', LIB_PATH)
shutil.copy('${MKLML_SHARED_LIB}', LIB_PATH)
if os.name != 'nt':
PACKAGE_DATA['paddlelite.libs'] += ['libmklml_intel.so', 'libiomp5.so']
else:
PACKAGE_DATA['paddlelite.libs'] += ['libiomp5md.dll', 'mklml.dll']
shutil.copy('${MKLML_SHARED_LIB_DEPS}', LIB_PATH)
PACKAGE_DATA['paddlelite.libs'] += ['msvcr120.dll']
# link lite.so to paddlelite.libs
if os.name != 'nt':
COMMAND = "patchelf --set-rpath '$ORIGIN/libs/' " + LITE_PATH + "/lite.so"
if os.system(COMMAND) != 0:
raise Exception("patch third_party libs failed, command: %s" % COMMAND)
# remove unused paddle/libs/__init__.py
if os.path.isfile(LIB_PATH+'/__init__.py'):
os.remove(LIB_PATH+'/__init__.py')
# set dir path of each package
PACKAGE_DIR = {
# The paddle.fluid.proto will be generated while compiling.
# So that package points to other directory.
'paddlelite.libs': LIB_PATH,
'paddlelite': LITE_PATH
}
if os.name == 'nt':
# fix the path separator under windows
fix_package_dir = {}
for k, v in PACKAGE_DIR.items():
fix_package_dir[k] = v.replace('/', '\\')
PACKAGE_DIR = fix_package_dir
setup(
name='paddlelite',
version=PADDLELITE_VERSION,
description='Paddle-Lite Library',
scripts=['lite/paddle_lite_opt'],
packages=['paddlelite', 'paddlelite.libs'],
package_dir=PACKAGE_DIR,
package_data=PACKAGE_DATA,
distclass=BinaryDistribution
)
| {
"pile_set_name": "Github"
} |
<HTML>
<HEAD>
<META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=windows-1252">
<title>Crimson Editor Setup</title>
<meta content="text/html; charset=iso-8859-1"
http-equiv="Content-Type">
<link type="text/css" rel="stylesheet" href="../../css/default.css">
</HEAD>
<BODY>
<FONT SIZE=5>
<B><U><h1>Using AutoIt3 and the Crimson Editor</h1>
</U>
</B></FONT><p align="left"><font size="2">Lastupdated:</font><FONT SIZE=5><FONT SIZE=2>
<!--webbot bot="Timestamp" startspan S-Type="EDITED" S-Format="%m/%d/%Y" -->01/30/2004<!--webbot
bot="Timestamp" I-CheckSum="12510" endspan -->
</FONT></p>
</FONT><FONT SIZE=2>
<P><b>Install AutoIt3 Syntax Highlighting and Context sensitive Help in the Crimson
Editor.<br>
(up to date for AutoIt3 ver: 3.0.91)</b> </P>
<OL>
<LI>First get the Crimson editor installed: <a href="http://www.crimsoneditor.com/">http://www.crimsoneditor.com/</a></LI>
<LI>Copy these 2 files <a href="autoit3.key">autoit3.key</a> & <a href="autoit3.spc">autoit3.spc</a>
to "C:\Program Files\Crimson Editor\spec"</LI>
<LI>Copy this file <a href="extension.au3">extension.au3</a> to "C:\Program
Files\Crimson Editor\link"</LI>
<LI>Start the Crimson editor program.</LI>
<LI>To enable Context sensitive Help do:</LI>
<P>1. Open Preferences dialog box and select User Tools page<BR>
2. Select an empty slot and fill with the following arguments.<BR>
- Menu Text: AutoIT Context Help<BR>
- Command: <i>Your-AutoIt-Dir</i>\AutoIt.chm<BR>
- Argument: $(CurrWord)<BR>
- Initial dir: $(FileDir)<BR>
- Hot key: F1<BR>
- Close on exit: Yes<BR>
- Save before execute: No<BR>
Now click <b><i> F1</i></b> on any keyword and it will jump right to its
description in the helpfile.<BR>
</P>
<LI>To enable running the script your working on from within Crimson:</LI>
<P>1. Open Preferences dialog box and select User Tools page<BR>
2. Select an empty slot and fill with the following arguments.<BR>
- Menu Text: AutoIT Run Script<br>
- Command: <i>Your-AutoIt-Dir</i>\AutoIt3.exe<BR>
- Argument: $(FileTitle).au3<br>
- Initial dir: $(FileDir)<BR>
- Hot key: CTRL + 1<BR>
- Close on exit: Yes<BR>
- Save before execute: Yes<BR>
Now click <b><i> CTRL+1</i></b> Will save and Run your script</P>
<LI>To enable Compiling the script your working on from within Crimson:</LI></OL>
<DIR>
<P>1. Open Preferences dialog box and select User Tools page<BR>
2. Select an empty slot and fill with the following arguments.<BR>
- Menu Text: AutoIT Compile Script<br>
- Command: <i>Your-AutoIt-Dir</i>\ Aut2Exe\Aut2Exe<br>
- Argument: /in $(FileTitle).au3<br>
- Initial dir: $(FileDir)<BR>
- Hot key: CTRL + 2<BR>
- Close on exit: Yes<BR>
- Save before execute: Yes<BR>
Now click <b><i> CTRL+2</i></b> will save and Compile your script.</P>
</DIR>
</FONT></BODY>
</HTML>
| {
"pile_set_name": "Github"
} |
// Copyright 2016 The Linux Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1
const (
// ImageLayoutFile is the file name of oci image layout file
ImageLayoutFile = "oci-layout"
// ImageLayoutVersion is the version of ImageLayout
ImageLayoutVersion = "1.0.0"
)
// ImageLayout is the structure in the "oci-layout" file, found in the root
// of an OCI Image-layout directory.
type ImageLayout struct {
Version string `json:"imageLayoutVersion"`
}
| {
"pile_set_name": "Github"
} |
The Toronto Stock Exchange's key index turned in another solid performance on Friday, posting a fourth record close for 1997 and hitting a new intra-day high as well.
The TSE 300 Composite Index added 35.14 points to close at 6138.80 in a seven session winning streak. Trading again was brisk: 120.3 million shares moved worth C$1.85 billion ($1.38 billion).
The new lifetime high is now 6144.29 points.
"Stocks are terrific. Another up day, in Toronto, across the board," said Maison Placements Canada president John Ing. "Gold stocks were very strong."
Bullion prices managed to push heavily weighted golds higher. Comex February gold rose $1.40 to finish at $356.40 on Friday.
Toronto's market has also benefitted from a recent influx of cash. Canadians are turning some bank savings into tax sheltered pension funds ahead of an end-February deadline for 1996 retirement plans, analysts said.
Of Toronto's 14 sub-indices, all but three -- real estate, forestry products and utilities -- climbed. Consumer products, golds, conglomerates and media stocks surged the most.
Advancers outpaced decliners 603 to 430 while 263 traded unchanged.
Tiny Mineral Resources Corp. topped Toronto's most actives. Shares rose a cent to seven and a half cents after news it plans a takeover bid for ailing Anvil Range Mining Corp.
Canadian Occidental Petroleum Ltd. jumped 1.25 to 25.30 on nearly 3.5 million shares, helped by firmer energy prices.
Bre-X Minerals Ltd. slipped 0.30 to 24.40. It said it was reviewing Placer Dome Inc.'s C$5 billion ($3.7 billion) offer and comparing it to Barrick Gold Corp.'s unpriced bid.
Despite Bre-X's response, a source in the Indonesian mines ministry said the Indonesian government, which suggested a Bre-X-Barrick partnership, was not likely to approve Placer's plan. The government has the final say over which companies will develop the huge Busang gold deposit in Indonesia.
Placer shares gained 0.20 to 29.15 while Barrick rose 1.05 to 36.85.
| {
"pile_set_name": "Github"
} |
<!DOCTYPE html><html class="reftest-wait"><head><script type="text/javascript">
function boom()
{
var area = document.getElementById("area");
var main = document.getElementById("main");
area.nextSibling.data += " a ";
document.documentElement.offsetHeight;
area.nextSibling.data = " b ";
main.previousSibling.data += " \u042A ";
document.documentElement.removeAttribute("class");
}
function boom0(ev)
{
setTimeout(boom, 0);
}
</script></head><body onload="boom0();"> <div id="main" style="width: 1px;"><img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAIAAACQd1PeAAAADElEQVR42mP4z8AAAAMBAQD3A0FDAAAAAElFTkSuQmCC" usemap="#Map"><map name="Map"><area id="area"> </map></div></body></html> | {
"pile_set_name": "Github"
} |
/*
*
*
* Distributed under the OpenDDS License.
* See: http://www.opendds.org/license.html
*/
#ifndef DATAREADER_LISTENER_IMPL_H
#define DATAREADER_LISTENER_IMPL_H
#include <ace/Global_Macros.h>
#include <dds/DdsDcpsSubscriptionC.h>
#include <dds/DCPS/LocalObject.h>
#include <dds/DCPS/Definitions.h>
class DataReaderListenerImpl
: public virtual OpenDDS::DCPS::LocalObject<DDS::DataReaderListener> {
public:
virtual void on_requested_deadline_missed(
DDS::DataReader_ptr reader,
const DDS::RequestedDeadlineMissedStatus& status);
virtual void on_requested_incompatible_qos(
DDS::DataReader_ptr reader,
const DDS::RequestedIncompatibleQosStatus& status);
virtual void on_sample_rejected(
DDS::DataReader_ptr reader,
const DDS::SampleRejectedStatus& status);
virtual void on_liveliness_changed(
DDS::DataReader_ptr reader,
const DDS::LivelinessChangedStatus& status);
virtual void on_data_available(
DDS::DataReader_ptr reader);
virtual void on_subscription_matched(
DDS::DataReader_ptr reader,
const DDS::SubscriptionMatchedStatus& status);
virtual void on_sample_lost(
DDS::DataReader_ptr reader,
const DDS::SampleLostStatus& status);
};
#endif /* DATAREADER_LISTENER_IMPL_H */
| {
"pile_set_name": "Github"
} |
/*
Simple DirectMedia Layer
Copyright (C) 1997-2019 Sam Lantinga <[email protected]>
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
#include "../../SDL_internal.h"
#include "SDL_androidvideo.h"
extern void Android_InitTouch(void);
extern void Android_QuitTouch(void);
extern void Android_OnTouch(SDL_Window *window, int touch_device_id_in, int pointer_finger_id_in, int action, float x, float y, float p);
/* vi: set ts=4 sw=4 expandtab: */
| {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 2013
* Minchan Kim <[email protected]>
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*/
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/buffer_head.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/cpumask.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "decompressor.h"
#include "squashfs.h"
/*
* This file implements multi-threaded decompression in the
* decompressor framework
*/
/*
* The reason that multiply two is that a CPU can request new I/O
* while it is waiting previous request.
*/
#define MAX_DECOMPRESSOR (num_online_cpus() * 2)
int squashfs_max_decompressors(void)
{
return MAX_DECOMPRESSOR;
}
struct squashfs_stream {
void *comp_opts;
struct list_head strm_list;
struct mutex mutex;
int avail_decomp;
wait_queue_head_t wait;
};
struct decomp_stream {
void *stream;
struct list_head list;
};
static void put_decomp_stream(struct decomp_stream *decomp_strm,
struct squashfs_stream *stream)
{
mutex_lock(&stream->mutex);
list_add(&decomp_strm->list, &stream->strm_list);
mutex_unlock(&stream->mutex);
wake_up(&stream->wait);
}
void *squashfs_decompressor_create(struct squashfs_sb_info *msblk,
void *comp_opts)
{
struct squashfs_stream *stream;
struct decomp_stream *decomp_strm = NULL;
int err = -ENOMEM;
stream = kzalloc(sizeof(*stream), GFP_KERNEL);
if (!stream)
goto out;
stream->comp_opts = comp_opts;
mutex_init(&stream->mutex);
INIT_LIST_HEAD(&stream->strm_list);
init_waitqueue_head(&stream->wait);
/*
* We should have a decompressor at least as default
* so if we fail to allocate new decompressor dynamically,
* we could always fall back to default decompressor and
* file system works.
*/
decomp_strm = kmalloc(sizeof(*decomp_strm), GFP_KERNEL);
if (!decomp_strm)
goto out;
decomp_strm->stream = msblk->decompressor->init(msblk,
stream->comp_opts);
if (IS_ERR(decomp_strm->stream)) {
err = PTR_ERR(decomp_strm->stream);
goto out;
}
list_add(&decomp_strm->list, &stream->strm_list);
stream->avail_decomp = 1;
return stream;
out:
kfree(decomp_strm);
kfree(stream);
return ERR_PTR(err);
}
void squashfs_decompressor_destroy(struct squashfs_sb_info *msblk)
{
struct squashfs_stream *stream = msblk->stream;
if (stream) {
struct decomp_stream *decomp_strm;
while (!list_empty(&stream->strm_list)) {
decomp_strm = list_entry(stream->strm_list.prev,
struct decomp_stream, list);
list_del(&decomp_strm->list);
msblk->decompressor->free(decomp_strm->stream);
kfree(decomp_strm);
stream->avail_decomp--;
}
WARN_ON(stream->avail_decomp);
kfree(stream->comp_opts);
kfree(stream);
}
}
static struct decomp_stream *get_decomp_stream(struct squashfs_sb_info *msblk,
struct squashfs_stream *stream)
{
struct decomp_stream *decomp_strm;
while (1) {
mutex_lock(&stream->mutex);
/* There is available decomp_stream */
if (!list_empty(&stream->strm_list)) {
decomp_strm = list_entry(stream->strm_list.prev,
struct decomp_stream, list);
list_del(&decomp_strm->list);
mutex_unlock(&stream->mutex);
break;
}
/*
* If there is no available decomp and already full,
* let's wait for releasing decomp from other users.
*/
if (stream->avail_decomp >= MAX_DECOMPRESSOR)
goto wait;
/* Let's allocate new decomp */
decomp_strm = kmalloc(sizeof(*decomp_strm), GFP_KERNEL);
if (!decomp_strm)
goto wait;
decomp_strm->stream = msblk->decompressor->init(msblk,
stream->comp_opts);
if (IS_ERR(decomp_strm->stream)) {
kfree(decomp_strm);
goto wait;
}
stream->avail_decomp++;
WARN_ON(stream->avail_decomp > MAX_DECOMPRESSOR);
mutex_unlock(&stream->mutex);
break;
wait:
/*
* If system memory is tough, let's for other's
* releasing instead of hurting VM because it could
* make page cache thrashing.
*/
mutex_unlock(&stream->mutex);
wait_event(stream->wait,
!list_empty(&stream->strm_list));
}
return decomp_strm;
}
int squashfs_decompress(struct squashfs_sb_info *msblk, struct buffer_head **bh,
int b, int offset, int length, struct squashfs_page_actor *output)
{
int res;
struct squashfs_stream *stream = msblk->stream;
struct decomp_stream *decomp_stream = get_decomp_stream(msblk, stream);
res = msblk->decompressor->decompress(msblk, decomp_stream->stream,
bh, b, offset, length, output);
put_decomp_stream(decomp_stream, stream);
if (res < 0)
ERROR("%s decompression failed, data probably corrupt\n",
msblk->decompressor->name);
return res;
}
| {
"pile_set_name": "Github"
} |
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
//go:generate go run gen.go
// This program generates system adaptation constants and types,
// internet protocol constants and tables by reading template files
// and IANA protocol registries.
package main
import (
"bytes"
"encoding/xml"
"fmt"
"go/format"
"io"
"io/ioutil"
"net/http"
"os"
"os/exec"
"runtime"
"strconv"
"strings"
)
func main() {
if err := genzsys(); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
if err := geniana(); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
func genzsys() error {
defs := "defs_" + runtime.GOOS + ".go"
f, err := os.Open(defs)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return err
}
f.Close()
cmd := exec.Command("go", "tool", "cgo", "-godefs", defs)
b, err := cmd.Output()
if err != nil {
return err
}
// The ipv6 pacakge still supports go1.2, and so we need to
// take care of additional platforms in go1.3 and above for
// working with go1.2.
switch {
case runtime.GOOS == "dragonfly" || runtime.GOOS == "solaris":
b = bytes.Replace(b, []byte("package ipv6\n"), []byte("// +build "+runtime.GOOS+"\n\npackage ipv6\n"), 1)
case runtime.GOOS == "linux" && (runtime.GOARCH == "arm64" || runtime.GOARCH == "mips64" || runtime.GOARCH == "mips64le" || runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le"):
b = bytes.Replace(b, []byte("package ipv6\n"), []byte("// +build "+runtime.GOOS+","+runtime.GOARCH+"\n\npackage ipv6\n"), 1)
}
b, err = format.Source(b)
if err != nil {
return err
}
zsys := "zsys_" + runtime.GOOS + ".go"
switch runtime.GOOS {
case "freebsd", "linux":
zsys = "zsys_" + runtime.GOOS + "_" + runtime.GOARCH + ".go"
}
if err := ioutil.WriteFile(zsys, b, 0644); err != nil {
return err
}
return nil
}
var registries = []struct {
url string
parse func(io.Writer, io.Reader) error
}{
{
"http://www.iana.org/assignments/icmpv6-parameters/icmpv6-parameters.xml",
parseICMPv6Parameters,
},
}
func geniana() error {
var bb bytes.Buffer
fmt.Fprintf(&bb, "// go generate gen.go\n")
fmt.Fprintf(&bb, "// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\n\n")
fmt.Fprintf(&bb, "package ipv6\n\n")
for _, r := range registries {
resp, err := http.Get(r.url)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("got HTTP status code %v for %v\n", resp.StatusCode, r.url)
}
if err := r.parse(&bb, resp.Body); err != nil {
return err
}
fmt.Fprintf(&bb, "\n")
}
b, err := format.Source(bb.Bytes())
if err != nil {
return err
}
if err := ioutil.WriteFile("iana.go", b, 0644); err != nil {
return err
}
return nil
}
func parseICMPv6Parameters(w io.Writer, r io.Reader) error {
dec := xml.NewDecoder(r)
var icp icmpv6Parameters
if err := dec.Decode(&icp); err != nil {
return err
}
prs := icp.escape()
fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated)
fmt.Fprintf(w, "const (\n")
for _, pr := range prs {
if pr.Name == "" {
continue
}
fmt.Fprintf(w, "ICMPType%s ICMPType = %d", pr.Name, pr.Value)
fmt.Fprintf(w, "// %s\n", pr.OrigName)
}
fmt.Fprintf(w, ")\n\n")
fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated)
fmt.Fprintf(w, "var icmpTypes = map[ICMPType]string{\n")
for _, pr := range prs {
if pr.Name == "" {
continue
}
fmt.Fprintf(w, "%d: %q,\n", pr.Value, strings.ToLower(pr.OrigName))
}
fmt.Fprintf(w, "}\n")
return nil
}
type icmpv6Parameters struct {
XMLName xml.Name `xml:"registry"`
Title string `xml:"title"`
Updated string `xml:"updated"`
Registries []struct {
Title string `xml:"title"`
Records []struct {
Value string `xml:"value"`
Name string `xml:"name"`
} `xml:"record"`
} `xml:"registry"`
}
type canonICMPv6ParamRecord struct {
OrigName string
Name string
Value int
}
func (icp *icmpv6Parameters) escape() []canonICMPv6ParamRecord {
id := -1
for i, r := range icp.Registries {
if strings.Contains(r.Title, "Type") || strings.Contains(r.Title, "type") {
id = i
break
}
}
if id < 0 {
return nil
}
prs := make([]canonICMPv6ParamRecord, len(icp.Registries[id].Records))
sr := strings.NewReplacer(
"Messages", "",
"Message", "",
"ICMP", "",
"+", "P",
"-", "",
"/", "",
".", "",
" ", "",
)
for i, pr := range icp.Registries[id].Records {
if strings.Contains(pr.Name, "Reserved") ||
strings.Contains(pr.Name, "Unassigned") ||
strings.Contains(pr.Name, "Deprecated") ||
strings.Contains(pr.Name, "Experiment") ||
strings.Contains(pr.Name, "experiment") {
continue
}
ss := strings.Split(pr.Name, "\n")
if len(ss) > 1 {
prs[i].Name = strings.Join(ss, " ")
} else {
prs[i].Name = ss[0]
}
s := strings.TrimSpace(prs[i].Name)
prs[i].OrigName = s
prs[i].Name = sr.Replace(s)
prs[i].Value, _ = strconv.Atoi(pr.Value)
}
return prs
}
| {
"pile_set_name": "Github"
} |
#!/usr/bin/env perl
use Expect;
use strict;
use IO::Pty;
sub do_ssh($$$) {
(my $username, my $password, my $host) = @_;
my $spawn = new Expect;
$spawn->raw_pty(1);
my $PROMPT;
# This function traps WINCH signals and passes them on
sub winch {
my $signame = shift;
my $pid = $spawn->pid;
print "pid $pid, SIG$signame\n";
$spawn->slave->clone_winsize_from(\*STDIN);
kill WINCH => $spawn->pid if $spawn->pid;
}
$SIG{WINCH} = \&winch; # best strategy
$spawn=Expect->spawn("ssh -C2qN -D 7070 $username\@$host");
# log everything if you want
# $spawn->log_file("/tmp/autossh.log.$$");
my $PROMPT = '[\]\$\>\#]$';
my $ret = $spawn->expect(30,
[ qr/\(yes\/no\)\?\s*$/ => sub { $spawn->send("yes\n"); exp_continue; } ],
[ qr/assword:\s*$/ => sub { $spawn->send("$password\n"); exp_continue; } ],
[ qr/ogin:\s*$/ => sub { $spawn->send("$username\n"); exp_continue; } ],
[ qr/REMOTE HOST IDEN/ => sub { print "FIX: .ssh/known_hosts\n"; exp_continue; } ],
[ qr/$PROMPT/ => sub { $spawn->send("echo Now try window resizing\n"); } ],
);
# Hand over control
$spawn->interact();
}
if ($ENV{USER} eq "bhj") {
chomp(my $password = qx(get-authinfo fixnet bhj));
do_ssh("bhj", $password, "fixnet");
}
| {
"pile_set_name": "Github"
} |
import numpy as np
from cvxpy import *
import matplotlib.pyplot as plt
import time
np.random.seed(1)
N = 100
ANSWERS = []
TIME = 0
# create an increasing input signal
xtrue = np.zeros(N)
xtrue[0:40] = 0.1
xtrue[49] = 2
xtrue[69:80] = 0.15
xtrue[79] = 1
xtrue = np.cumsum(xtrue)
# pass the increasing input through a moving-average filter
# and add Gaussian noise
h = np.array([1, -0.85, 0.7, -0.3])
k = h.size
yhat = np.convolve(h,xtrue)
y = yhat[0:-3].reshape(N,1) + np.random.randn(N,1)
xtrue = np.asmatrix(xtrue.reshape(N,1))
y = np.asmatrix(y)
v = Variable(N)
x = Variable(N)
constraints = [x >= 0]
for i in range(N - 1):
constraints.append( x[i+1] >= x[i] )
constraints.append( y == ( conv(h,x)[0:-3] + v ) )
obj = Minimize( norm(v) )
prob = Problem(obj, constraints)
tic = time.time()
ANSWERS.append(prob.solve())
toc = time.time()
TIME += toc - tic
pass #print x.value
pass #print v.value
pass #plt.figure(1)
pass #plt.plot(xtrue)
pass #plt.plot(x.value)
pass #plt.legend(["True signal", "MLE signal"])
pass #plt.title("Maximum likelihood reconstruction of FIR filter with constraints")
pass #plt.show()
constraints.append( y == ( conv(h,x)[0:-3] + v ) )
obj = Minimize( norm(v) )
prob = Problem(obj, constraints)
ANSWERS.append(prob.solve())
pass #plt.figure(1)
pass #plt.plot(xtrue)
pass #plt.plot(x.value)
pass #plt.legend(["True signal", "MLE signal"])
pass #plt.title("Maximum likelihood reconstruction of FIR filter no constraints")
pass #plt.show() | {
"pile_set_name": "Github"
} |
var evaluate1 = function () {
(function () {
print(eval("this"));
}).call(this);
};
var evaluate2 = function () {
var context = {};
(function () {
print(eval("this"));
}).call(context);
};
var evaluate3 = function (context) {
(function () {
print(eval("this"));
}).call(context);
}; | {
"pile_set_name": "Github"
} |
// Package jsoniter implements encoding and decoding of JSON as defined in
// RFC 4627 and provides interfaces with identical syntax of standard lib encoding/json.
// Converting from encoding/json to jsoniter is no more than replacing the package with jsoniter
// and variable type declarations (if any).
// jsoniter interfaces gives 100% compatibility with code using standard lib.
//
// "JSON and Go"
// (https://golang.org/doc/articles/json_and_go.html)
// gives a description of how Marshal/Unmarshal operate
// between arbitrary or predefined json objects and bytes,
// and it applies to jsoniter.Marshal/Unmarshal as well.
//
// Besides, jsoniter.Iterator provides a different set of interfaces
// iterating given bytes/string/reader
// and yielding parsed elements one by one.
// This set of interfaces reads input as required and gives
// better performance.
package jsoniter
| {
"pile_set_name": "Github"
} |
<?php
/**
* Copyright © Magento, Inc. All rights reserved.
* See COPYING.txt for license details.
*/
namespace Magento\Framework\Api\ExtensionAttribute\Config;
class Reader extends \Magento\Framework\Config\Reader\Filesystem
{
/**
* List of id attributes for merge
*
* @var array
*/
protected $_idAttributes = [
'/config/extension_attributes' => 'for',
'/config/extension_attributes/attribute' => 'code',
];
/**
* @param \Magento\Framework\Config\FileResolverInterface $fileResolver
* @param \Magento\Framework\Api\ExtensionAttribute\Config\Converter $converter
* @param \Magento\Framework\Api\ExtensionAttribute\Config\SchemaLocator $schemaLocator
* @param \Magento\Framework\Config\ValidationStateInterface $validationState
* @param string $fileName
* @param array $idAttributes
* @param string $domDocumentClass
* @param string $defaultScope
*/
public function __construct(
\Magento\Framework\Config\FileResolverInterface $fileResolver,
\Magento\Framework\Api\ExtensionAttribute\Config\Converter $converter,
\Magento\Framework\Api\ExtensionAttribute\Config\SchemaLocator $schemaLocator,
\Magento\Framework\Config\ValidationStateInterface $validationState,
$fileName = 'extension_attributes.xml',
$idAttributes = [],
$domDocumentClass = \Magento\Framework\Config\Dom::class,
$defaultScope = 'global'
) {
parent::__construct(
$fileResolver,
$converter,
$schemaLocator,
$validationState,
$fileName,
$idAttributes,
$domDocumentClass,
$defaultScope
);
}
}
| {
"pile_set_name": "Github"
} |
/* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA */
#ifndef MY_THREAD_LOCAL_INCLUDED
#define MY_THREAD_LOCAL_INCLUDED
#ifndef _WIN32
#include <pthread.h>
#endif
struct _db_code_state_;
typedef uint32 my_thread_id;
C_MODE_START
#ifdef _WIN32
typedef DWORD thread_local_key_t;
#else
typedef pthread_key_t thread_local_key_t;
#endif
static inline int my_create_thread_local_key(thread_local_key_t *key,
void (*destructor)(void *))
{
#ifdef _WIN32
*key= TlsAlloc();
return (*key == TLS_OUT_OF_INDEXES);
#else
return pthread_key_create(key, destructor);
#endif
}
static inline int my_delete_thread_local_key(thread_local_key_t key)
{
#ifdef _WIN32
return !TlsFree(key);
#else
return pthread_key_delete(key);
#endif
}
static inline void* my_get_thread_local(thread_local_key_t key)
{
#ifdef _WIN32
return TlsGetValue(key);
#else
return pthread_getspecific(key);
#endif
}
static inline int my_set_thread_local(thread_local_key_t key,
void *value)
{
#ifdef _WIN32
return !TlsSetValue(key, value);
#else
return pthread_setspecific(key, value);
#endif
}
/**
Retrieve the MySQL thread-local storage variant of errno.
*/
int my_errno();
/**
Set the MySQL thread-local storage variant of errno.
*/
void set_my_errno(int my_errno);
#ifdef _WIN32
/*
thr_winerr is used for returning the original OS error-code in Windows,
my_osmaperr() returns EINVAL for all unknown Windows errors, hence we
preserve the original Windows Error code in thr_winerr.
*/
int thr_winerr();
void set_thr_winerr(int winerr);
#endif
#ifndef DBUG_OFF
/* Return pointer to DBUG for holding current state */
struct _db_code_state_ **my_thread_var_dbug();
my_thread_id my_thread_var_id();
void set_my_thread_var_id(my_thread_id id);
#endif
C_MODE_END
#endif // MY_THREAD_LOCAL_INCLUDED
| {
"pile_set_name": "Github"
} |
//===-- ARMScheduleV6.td - ARM v6 Scheduling Definitions ---*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the itinerary class data for the ARM v6 processors.
//
//===----------------------------------------------------------------------===//
// Model based on ARM1176
//
// Functional Units
def V6_Pipe : FuncUnit; // pipeline
// Scheduling information derived from "ARM1176JZF-S Technical Reference Manual"
//
def ARMV6Itineraries : ProcessorItineraries<
[V6_Pipe], [], [
//
// No operand cycles
InstrItinData<IIC_iALUx , [InstrStage<1, [V6_Pipe]>]>,
//
// Binary Instructions that produce a result
InstrItinData<IIC_iALUi , [InstrStage<1, [V6_Pipe]>], [2, 2]>,
InstrItinData<IIC_iALUr , [InstrStage<1, [V6_Pipe]>], [2, 2, 2]>,
InstrItinData<IIC_iALUsi , [InstrStage<1, [V6_Pipe]>], [2, 2, 1]>,
InstrItinData<IIC_iALUsr , [InstrStage<2, [V6_Pipe]>], [3, 3, 2, 1]>,
//
// Bitwise Instructions that produce a result
InstrItinData<IIC_iBITi , [InstrStage<1, [V6_Pipe]>], [2, 2]>,
InstrItinData<IIC_iBITr , [InstrStage<1, [V6_Pipe]>], [2, 2, 2]>,
InstrItinData<IIC_iBITsi , [InstrStage<1, [V6_Pipe]>], [2, 2, 1]>,
InstrItinData<IIC_iBITsr , [InstrStage<2, [V6_Pipe]>], [3, 3, 2, 1]>,
//
// Unary Instructions that produce a result
InstrItinData<IIC_iUNAr , [InstrStage<1, [V6_Pipe]>], [2, 2]>,
InstrItinData<IIC_iUNAsi , [InstrStage<1, [V6_Pipe]>], [2, 1]>,
//
// Zero and sign extension instructions
InstrItinData<IIC_iEXTr , [InstrStage<1, [V6_Pipe]>], [1, 1]>,
InstrItinData<IIC_iEXTAr , [InstrStage<1, [V6_Pipe]>], [2, 2, 1]>,
InstrItinData<IIC_iEXTAsr , [InstrStage<2, [V6_Pipe]>], [3, 3, 2, 1]>,
//
// Compare instructions
InstrItinData<IIC_iCMPi , [InstrStage<1, [V6_Pipe]>], [2]>,
InstrItinData<IIC_iCMPr , [InstrStage<1, [V6_Pipe]>], [2, 2]>,
InstrItinData<IIC_iCMPsi , [InstrStage<1, [V6_Pipe]>], [2, 1]>,
InstrItinData<IIC_iCMPsr , [InstrStage<2, [V6_Pipe]>], [3, 2, 1]>,
//
// Test instructions
InstrItinData<IIC_iTSTi , [InstrStage<1, [V6_Pipe]>], [2]>,
InstrItinData<IIC_iTSTr , [InstrStage<1, [V6_Pipe]>], [2, 2]>,
InstrItinData<IIC_iTSTsi , [InstrStage<1, [V6_Pipe]>], [2, 1]>,
InstrItinData<IIC_iTSTsr , [InstrStage<2, [V6_Pipe]>], [3, 2, 1]>,
//
// Move instructions, unconditional
InstrItinData<IIC_iMOVi , [InstrStage<1, [V6_Pipe]>], [2]>,
InstrItinData<IIC_iMOVr , [InstrStage<1, [V6_Pipe]>], [2, 2]>,
InstrItinData<IIC_iMOVsi , [InstrStage<1, [V6_Pipe]>], [2, 1]>,
InstrItinData<IIC_iMOVsr , [InstrStage<2, [V6_Pipe]>], [3, 2, 1]>,
InstrItinData<IIC_iMOVix2 , [InstrStage<1, [V6_Pipe]>,
InstrStage<1, [V6_Pipe]>], [2]>,
InstrItinData<IIC_iMOVix2addpc,[InstrStage<1, [V6_Pipe]>,
InstrStage<1, [V6_Pipe]>,
InstrStage<1, [V6_Pipe]>], [3]>,
InstrItinData<IIC_iMOVix2ld , [InstrStage<1, [V6_Pipe]>,
InstrStage<1, [V6_Pipe]>,
InstrStage<1, [V6_Pipe]>], [5]>,
//
// Move instructions, conditional
InstrItinData<IIC_iCMOVi , [InstrStage<1, [V6_Pipe]>], [3]>,
InstrItinData<IIC_iCMOVr , [InstrStage<1, [V6_Pipe]>], [3, 2]>,
InstrItinData<IIC_iCMOVsi , [InstrStage<1, [V6_Pipe]>], [3, 1]>,
InstrItinData<IIC_iCMOVsr , [InstrStage<1, [V6_Pipe]>], [4, 2, 1]>,
InstrItinData<IIC_iCMOVix2 , [InstrStage<1, [V6_Pipe]>,
InstrStage<1, [V6_Pipe]>], [4]>,
//
// MVN instructions
InstrItinData<IIC_iMVNi , [InstrStage<1, [V6_Pipe]>], [2]>,
InstrItinData<IIC_iMVNr , [InstrStage<1, [V6_Pipe]>], [2, 2]>,
InstrItinData<IIC_iMVNsi , [InstrStage<1, [V6_Pipe]>], [2, 1]>,
InstrItinData<IIC_iMVNsr , [InstrStage<2, [V6_Pipe]>], [3, 2, 1]>,
// Integer multiply pipeline
//
InstrItinData<IIC_iMUL16 , [InstrStage<1, [V6_Pipe]>], [4, 1, 1]>,
InstrItinData<IIC_iMAC16 , [InstrStage<1, [V6_Pipe]>], [4, 1, 1, 2]>,
InstrItinData<IIC_iMUL32 , [InstrStage<2, [V6_Pipe]>], [5, 1, 1]>,
InstrItinData<IIC_iMAC32 , [InstrStage<2, [V6_Pipe]>], [5, 1, 1, 2]>,
InstrItinData<IIC_iMUL64 , [InstrStage<3, [V6_Pipe]>], [6, 1, 1]>,
InstrItinData<IIC_iMAC64 , [InstrStage<3, [V6_Pipe]>], [6, 1, 1, 2]>,
// Integer load pipeline
//
// Immediate offset
InstrItinData<IIC_iLoad_i , [InstrStage<1, [V6_Pipe]>], [4, 1]>,
InstrItinData<IIC_iLoad_bh_i, [InstrStage<1, [V6_Pipe]>], [4, 1]>,
InstrItinData<IIC_iLoad_d_i , [InstrStage<1, [V6_Pipe]>], [4, 1]>,
//
// Register offset
InstrItinData<IIC_iLoad_r , [InstrStage<1, [V6_Pipe]>], [4, 1, 1]>,
InstrItinData<IIC_iLoad_bh_r, [InstrStage<1, [V6_Pipe]>], [4, 1, 1]>,
InstrItinData<IIC_iLoad_d_r , [InstrStage<1, [V6_Pipe]>], [4, 1, 1]>,
//
// Scaled register offset, issues over 2 cycles
InstrItinData<IIC_iLoad_si , [InstrStage<2, [V6_Pipe]>], [5, 2, 1]>,
InstrItinData<IIC_iLoad_bh_si, [InstrStage<2, [V6_Pipe]>], [5, 2, 1]>,
//
// Immediate offset with update
InstrItinData<IIC_iLoad_iu , [InstrStage<1, [V6_Pipe]>], [4, 2, 1]>,
InstrItinData<IIC_iLoad_bh_iu, [InstrStage<1, [V6_Pipe]>], [4, 2, 1]>,
//
// Register offset with update
InstrItinData<IIC_iLoad_ru , [InstrStage<1, [V6_Pipe]>], [4, 2, 1, 1]>,
InstrItinData<IIC_iLoad_bh_ru, [InstrStage<1, [V6_Pipe]>], [4, 2, 1, 1]>,
InstrItinData<IIC_iLoad_d_ru , [InstrStage<1, [V6_Pipe]>], [4, 2, 1, 1]>,
//
// Scaled register offset with update, issues over 2 cycles
InstrItinData<IIC_iLoad_siu, [InstrStage<2, [V6_Pipe]>], [5, 2, 2, 1]>,
InstrItinData<IIC_iLoad_bh_siu,[InstrStage<2, [V6_Pipe]>], [5, 2, 2, 1]>,
//
// Load multiple, def is the 5th operand.
InstrItinData<IIC_iLoad_m , [InstrStage<3, [V6_Pipe]>], [1, 1, 1, 1, 4]>,
//
// Load multiple + update, defs are the 1st and 5th operands.
InstrItinData<IIC_iLoad_mu , [InstrStage<3, [V6_Pipe]>], [2, 1, 1, 1, 4]>,
//
// Load multiple plus branch
InstrItinData<IIC_iLoad_mBr, [InstrStage<3, [V6_Pipe]>,
InstrStage<1, [V6_Pipe]>], [1, 2, 1, 1, 4]>,
//
// iLoadi + iALUr for t2LDRpci_pic.
InstrItinData<IIC_iLoadiALU, [InstrStage<1, [V6_Pipe]>,
InstrStage<1, [V6_Pipe]>], [3, 1]>,
//
// Pop, def is the 3rd operand.
InstrItinData<IIC_iPop , [InstrStage<3, [V6_Pipe]>], [1, 1, 4]>,
//
// Pop + branch, def is the 3rd operand.
InstrItinData<IIC_iPop_Br, [InstrStage<3, [V6_Pipe]>,
InstrStage<1, [V6_Pipe]>], [1, 2, 4]>,
// Integer store pipeline
//
// Immediate offset
InstrItinData<IIC_iStore_i , [InstrStage<1, [V6_Pipe]>], [2, 1]>,
InstrItinData<IIC_iStore_bh_i, [InstrStage<1, [V6_Pipe]>], [2, 1]>,
InstrItinData<IIC_iStore_d_i , [InstrStage<1, [V6_Pipe]>], [2, 1]>,
//
// Register offset
InstrItinData<IIC_iStore_r , [InstrStage<1, [V6_Pipe]>], [2, 1, 1]>,
InstrItinData<IIC_iStore_bh_r, [InstrStage<1, [V6_Pipe]>], [2, 1, 1]>,
InstrItinData<IIC_iStore_d_r , [InstrStage<1, [V6_Pipe]>], [2, 1, 1]>,
//
// Scaled register offset, issues over 2 cycles
InstrItinData<IIC_iStore_si , [InstrStage<2, [V6_Pipe]>], [2, 2, 1]>,
InstrItinData<IIC_iStore_bh_si, [InstrStage<2, [V6_Pipe]>], [2, 2, 1]>,
//
// Immediate offset with update
InstrItinData<IIC_iStore_iu , [InstrStage<1, [V6_Pipe]>], [2, 2, 1]>,
InstrItinData<IIC_iStore_bh_iu, [InstrStage<1, [V6_Pipe]>], [2, 2, 1]>,
//
// Register offset with update
InstrItinData<IIC_iStore_ru, [InstrStage<1, [V6_Pipe]>], [2, 2, 1, 1]>,
InstrItinData<IIC_iStore_bh_ru,[InstrStage<1, [V6_Pipe]>], [2, 2, 1, 1]>,
InstrItinData<IIC_iStore_d_ru, [InstrStage<1, [V6_Pipe]>], [2, 2, 1, 1]>,
//
// Scaled register offset with update, issues over 2 cycles
InstrItinData<IIC_iStore_siu, [InstrStage<2, [V6_Pipe]>], [2, 2, 2, 1]>,
InstrItinData<IIC_iStore_bh_siu,[InstrStage<2, [V6_Pipe]>], [2, 2, 2, 1]>,
//
// Store multiple
InstrItinData<IIC_iStore_m , [InstrStage<3, [V6_Pipe]>]>,
//
// Store multiple + update
InstrItinData<IIC_iStore_mu , [InstrStage<3, [V6_Pipe]>], [2]>,
// Branch
//
// no delay slots, so the latency of a branch is unimportant
InstrItinData<IIC_Br , [InstrStage<1, [V6_Pipe]>]>,
// VFP
// Issue through integer pipeline, and execute in NEON unit. We assume
// RunFast mode so that NFP pipeline is used for single-precision when
// possible.
//
// FP Special Register to Integer Register File Move
InstrItinData<IIC_fpSTAT , [InstrStage<1, [V6_Pipe]>], [3]>,
//
// Single-precision FP Unary
InstrItinData<IIC_fpUNA32 , [InstrStage<1, [V6_Pipe]>], [5, 2]>,
//
// Double-precision FP Unary
InstrItinData<IIC_fpUNA64 , [InstrStage<1, [V6_Pipe]>], [5, 2]>,
//
// Single-precision FP Compare
InstrItinData<IIC_fpCMP32 , [InstrStage<1, [V6_Pipe]>], [2, 2]>,
//
// Double-precision FP Compare
InstrItinData<IIC_fpCMP64 , [InstrStage<1, [V6_Pipe]>], [2, 2]>,
//
// Single to Double FP Convert
InstrItinData<IIC_fpCVTSD , [InstrStage<1, [V6_Pipe]>], [5, 2]>,
//
// Double to Single FP Convert
InstrItinData<IIC_fpCVTDS , [InstrStage<1, [V6_Pipe]>], [5, 2]>,
//
// Single-Precision FP to Integer Convert
InstrItinData<IIC_fpCVTSI , [InstrStage<1, [V6_Pipe]>], [9, 2]>,
//
// Double-Precision FP to Integer Convert
InstrItinData<IIC_fpCVTDI , [InstrStage<1, [V6_Pipe]>], [9, 2]>,
//
// Integer to Single-Precision FP Convert
InstrItinData<IIC_fpCVTIS , [InstrStage<1, [V6_Pipe]>], [9, 2]>,
//
// Integer to Double-Precision FP Convert
InstrItinData<IIC_fpCVTID , [InstrStage<1, [V6_Pipe]>], [9, 2]>,
//
// Single-precision FP ALU
InstrItinData<IIC_fpALU32 , [InstrStage<1, [V6_Pipe]>], [9, 2, 2]>,
//
// Double-precision FP ALU
InstrItinData<IIC_fpALU64 , [InstrStage<1, [V6_Pipe]>], [9, 2, 2]>,
//
// Single-precision FP Multiply
InstrItinData<IIC_fpMUL32 , [InstrStage<1, [V6_Pipe]>], [9, 2, 2]>,
//
// Double-precision FP Multiply
InstrItinData<IIC_fpMUL64 , [InstrStage<2, [V6_Pipe]>], [9, 2, 2]>,
//
// Single-precision FP MAC
InstrItinData<IIC_fpMAC32 , [InstrStage<1, [V6_Pipe]>], [9, 2, 2, 2]>,
//
// Double-precision FP MAC
InstrItinData<IIC_fpMAC64 , [InstrStage<2, [V6_Pipe]>], [9, 2, 2, 2]>,
//
// Single-precision Fused FP MAC
InstrItinData<IIC_fpFMAC32, [InstrStage<1, [V6_Pipe]>], [9, 2, 2, 2]>,
//
// Double-precision Fused FP MAC
InstrItinData<IIC_fpFMAC64, [InstrStage<2, [V6_Pipe]>], [9, 2, 2, 2]>,
//
// Single-precision FP DIV
InstrItinData<IIC_fpDIV32 , [InstrStage<15, [V6_Pipe]>], [20, 2, 2]>,
//
// Double-precision FP DIV
InstrItinData<IIC_fpDIV64 , [InstrStage<29, [V6_Pipe]>], [34, 2, 2]>,
//
// Single-precision FP SQRT
InstrItinData<IIC_fpSQRT32 , [InstrStage<15, [V6_Pipe]>], [20, 2, 2]>,
//
// Double-precision FP SQRT
InstrItinData<IIC_fpSQRT64 , [InstrStage<29, [V6_Pipe]>], [34, 2, 2]>,
//
// Integer to Single-precision Move
InstrItinData<IIC_fpMOVIS, [InstrStage<1, [V6_Pipe]>], [10, 1]>,
//
// Integer to Double-precision Move
InstrItinData<IIC_fpMOVID, [InstrStage<1, [V6_Pipe]>], [10, 1, 1]>,
//
// Single-precision to Integer Move
InstrItinData<IIC_fpMOVSI, [InstrStage<1, [V6_Pipe]>], [10, 1]>,
//
// Double-precision to Integer Move
InstrItinData<IIC_fpMOVDI, [InstrStage<1, [V6_Pipe]>], [10, 10, 1]>,
//
// Single-precision FP Load
InstrItinData<IIC_fpLoad32 , [InstrStage<1, [V6_Pipe]>], [5, 2, 2]>,
//
// Double-precision FP Load
InstrItinData<IIC_fpLoad64 , [InstrStage<1, [V6_Pipe]>], [5, 2, 2]>,
//
// FP Load Multiple
InstrItinData<IIC_fpLoad_m , [InstrStage<3, [V6_Pipe]>], [2, 1, 1, 5]>,
//
// FP Load Multiple + update
InstrItinData<IIC_fpLoad_mu, [InstrStage<3, [V6_Pipe]>], [3, 2, 1, 1, 5]>,
//
// Single-precision FP Store
InstrItinData<IIC_fpStore32 , [InstrStage<1, [V6_Pipe]>], [2, 2, 2]>,
//
// Double-precision FP Store
// use FU_Issue to enforce the 1 load/store per cycle limit
InstrItinData<IIC_fpStore64 , [InstrStage<1, [V6_Pipe]>], [2, 2, 2]>,
//
// FP Store Multiple
InstrItinData<IIC_fpStore_m, [InstrStage<3, [V6_Pipe]>], [2, 2, 2, 2]>,
//
// FP Store Multiple + update
InstrItinData<IIC_fpStore_mu,[InstrStage<3, [V6_Pipe]>], [3, 2, 2, 2, 2]>
]>;
| {
"pile_set_name": "Github"
} |
.ng-valid[required],
.ng-valid.required {
border-left: 5px solid #42A948;
/* green */
}
.ng-invalid:not(form) {
border-left: 5px solid #a94442;
/* red */
} | {
"pile_set_name": "Github"
} |
<?php
// This file is part of Moodle - http://moodle.org/
//
// Moodle is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Moodle is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Moodle. If not, see <http://www.gnu.org/licenses/>.
/**
* Privacy Subsystem implementation for enrol_guest.
*
* @package enrol_guest
* @copyright 2018 Carlos Escobedo <[email protected]>
* @license http://www.gnu.org/copyleft/gpl.html GNU GPL v3 or later
*/
namespace enrol_guest\privacy;
defined('MOODLE_INTERNAL') || die();
/**
* Privacy Subsystem for enrol_guest implementing null_provider.
*
* @copyright 2018 Carlos Escobedo <[email protected]>
* @license http://www.gnu.org/copyleft/gpl.html GNU GPL v3 or later
*/
class provider implements \core_privacy\local\metadata\null_provider {
/**
* Get the language string identifier with the component's language
* file to explain why this plugin stores no data.
*
* @return string
*/
public static function get_reason() : string {
return 'privacy:metadata';
}
} | {
"pile_set_name": "Github"
} |
# 从零打造Echarts —— v1 ZRender和MVC
本篇开始进入正文。
## 写在前面
- 图形、元素、图形元素,都指的是`XElement`,看情况哪个顺口用哪个。
- `ts`可能会报警告,我只是想用代码提示功能而已,就不管辣么多了。
- 文内并没有贴出所有代码,且随着版本更迭,可能有修改不及时导致文内代码和源码不一致的情况,可以参考源码进行查看。
- 源码查看的方式,源码放在[这里](https://github.com/webbillion/xrender-src),每一个版本都有对应的分支。
- 由于水平所限,以及后续设计的变更,无法在最开始的版本中就写出最优的代码,甚至可能还会存在一些问题,如果遇到你认为不应该这样写的代码请先不要着急。
## zrender
`zrender`是`echarts`使用的`2d`渲染器,意思是,对于`2d`图表,`echarts`更多的是对于数据的处理,将数据绘制到`canvas`上这一步是由`zrender`来完成的。
大概流程就是,使用者告诉`echarts`我要画条形图,有十条数据,`echarts`计算出条形图高度和坐标和使用`zrender`在画布上绘制坐标轴和十个矩形。它也是`echarts`唯一的依赖。它是一个轻量的二维绘图引擎,但是实现了很多功能。本文就从实现`zrender`开始作为实现`echarts`的第一步。
## 本篇目标
前文说到,打造`echarts`从打造一个`zrender`开始,但是`zrender`的功能同样很多,不可能一步到位,所以先从最基础的功能开始,而我们的库我给它命名为`XRender`,即无限可能的渲染器。本篇结束后它将实现`zrender`的以下功能。
```javascript
import * as xrender from '../xrender'
let xr = xrender.init('#app')
let circle = new xrender.Circle({
shape: {
cx: 40,
cy: 40,
r: 20
}
})
xr.add(circle)
// 现在画布上有一个半径为20的圆了
```
## 正文
### 模式
首先明确一点,我们根据数据来实现视图。
然后看看我们需要哪些东西来实现我们要的功能。
- 要绘制的元素,如圆、长方形, 即`Element`,为了和`html`中区分,暂命名为`XElment`。
- 因为会有多个元素,我们需要对其进行增查删改等管理,类似于`3d`游戏开发中常见的`scene`(场景),这里叫做`Stage`,舞台。`zrender`中叫做`Storage`。都差不多。
- 需要将舞台上的元素绘制到画布上,叫做`Paniter`。
- 最终需要将上面的三者关联起来,即`XRender`。
也就是`MV`模式。
考虑到会有多种图形,所以`xrender`最终导出的是一个命名空间,遵循`zrender`的设计,并不向外暴露`XRender`类。那么接下来就可以开始写代码了。
### 环境搭建
为了方便,我使用了`vue-cli`搭建环境,你也可以用其它方式,只要能支持出现的语法就行。接着创建`xrender`目录。或者克隆仓库一键安装。根据上面列出的类,创建如下文件。
``` sh
index.js # 外部引用的入口
Painter.js
Stage.js
XElement.js
XRender.js
```
但是需要做一点小小的修正,因为`XElement`应该是一个抽象类,它只代表一个元素,它本身不提供任何绘制方法,提供绘制方法的应该是继承它的圆`Circle`类。所以修改后的目录如下。
``` sh
│ index.js
│ Painter.js
│ Stage.js
│ XRender.js
│
└─xElements
Circle.js
XElement.js
```
接着在每个文件内创建对应的类,并让构造函数打印出当前类的名称,然后导出,以便搭建整体架构。如:
```javascript
class Stage {
constructor () {
console.log('Stage')
}
}
export default Stage
```
然后编写`index.js`
```javascript
import XRedner from './XRender'
// 导出具体的元素类
export { default as Circle } from './xElements/Circle'
// 只暴露方法而不直接暴露`XRender`类
export function init () {
return new XRedner()
}
```
在使用它之前我们还得为`XRender`类添加`add`方法,尽管现在它什么都没做。
```javascript
// 尽管没有使用,但是需要用它来做类型提示
// 用Flow和ts,或jsdoc等,都嫌麻烦
import XElement from "./xElements/XElement";
class XRender {
/**
*
* @param {XElement} xel
*/
add (xel) {
console.log('add an el')
}
}
```
接下来就可以在`App.vue`中写最开始的代码。如果一切顺利,应该能在控制台上看到
``` sh
XRender
Circle
add an el
```
## 细节填充
在下一步之前,我们可能需要一些辅助函数,比如我们经常会判断某个参数是不是字符串。为此我们创建`util`文件夹来存放辅助函数。
### XElement
图形元素,一个抽象类,它应该帮继承它的类如`Circle`处理好样式之类的选项,`Circle`只需要绘制即可。显然它的构造函数应该接受一个选项作为参数,包括这些:
```typescript
import { merge } from '../util'
/**
* 目前什么都没有
*/
export interface XElementShape {
}
/**
* 颜色
*/
type Color = String | CanvasGradient | CanvasPattern
export interface XElementStyle {
// 先只设定描边颜色和填充
/**
* 填充
*/
fill?: Color
/**
* 描边
*/
stroke?: Color
}
/**
* 元素选项接口
*/
interface XElementOptions {
/**
* 元素类型
*/
type?: string
/**
* 形状
*/
shape?: XElementShape
/**
* 样式
*/
style?: XElementStyle
}
```
接着是对类的设计,对于所有选项,它应该有一个默认值,然后在更新时被覆盖。
```typescript
class XElement {
shape: XElementShape = {}
style: XElementStyle = {}
constructor (opt: XElementOptions) {
this.options = opt
}
/**
* 这一步不在构造函数内进行是因为放在构造函数内的话,会被子类的默认属性声明重写
*/
updateOptions () {
let opt = this.options
if (opt.shape) {
// 这个函数会覆盖第一个参数中原来的值
merge(this.shape, opt.shape)
}
if (opt.style) {
merge(this.style, opt.style)
}
}
}
```
对于一个元素,应该提供一个绘制方法,正如上面所提到的,这由它的子类提供。此外在绘制之前还需要对样式进行处理,绘制之后进行还原。而这就需要一个`canvas`的`context`。这里认为它由外部提供。涉及到的`api`请自行查阅。
```typescript
class XElement {
/**
* 绘制
*/
render (ctx: CanvasRenderingContext2D) {
}
/**
* 绘制之前进行样式的处理
*/
beforeRender (ctx: CanvasRenderingContext2D) {
this.updateOptions()
let style = this.style
ctx.save()
ctx.fillStyle = style.fill
ctx.strokeStyle = style.stroke
ctx.beginPath()
}
/**
* 绘制之后进行还原
*/
afterRender (ctx: CanvasRenderingContext2D) {
ctx.stroke()
ctx.fill()
ctx.restore()
}
/**
* 刷新,这个方法由外部调用
*/
refresh (ctx: CanvasRenderingContext2D) {
this.beforeRender(ctx)
this.render(ctx)
this.afterRender(ctx)
}
```
为什么不在创建它的时候传入`ctx`作为属性的一部分?实际上这完全可行。只是`zrender`这样设计,我也暂时先这么做。可能是为了解耦以及多种`ctx`的需要。
### Circle
基类`XElement`已经初步构造完毕,接下来就来构造`Circle`,我们只需声明它需要哪些配置,并提供绘制方法即可。也就是,如何绘制一个圆。
```typescript
import XElement, { XElementShape } from './XElement'
interface CircleShape extends XElementShape {
/**
* 圆心x坐标
*/
cx: number
/**
* 圆心y坐标
*/
cy: number
/**
* 半径
*/
r: number
}
interface CircleOptions extends XElementOptions {
shape: CircleShape
}
class Circle extends XElement {
name ='circle'
shape: CircleShape = {
cx: 0,
cy: 0,
r: 100
}
constructor (opt: CircleOptions) {
super(opt)
}
render (ctx: CanvasRenderingContext2D) {
let shape = this.shape
ctx.arc(shape.cx, shape.cy, shape.r, 0, Math.PI * 2, true)
}
}
export default Circle
```
来验证一下吧,在`App.vue`中加入如下代码:
```typescript
mounted () {
let canvas = document.querySelector('#canvas') as HTMLCanvasElement
let ctx = canvas.getContext('2d') as CanvasRenderingContext2D
circle.refresh(ctx)
}
```
查看页面,已经有了一个黑色的圆。
### Stage
需要它对元素进行增查删改,很容易写出这样的代码。
```typescript
class Stage {
/**
* 所有元素的集合
*/
xelements: XElement[] = []
constructor () {
console.log('Stage')
}
/**
* 添加元素
* 显然可能会添加多个元素
*/
add (...xelements: XElement[]) {
this.xelements.push(...xelements)
}
/**
* 删除指定元素
*/
delete (xel: XElement) {
let index = this.xelements.indexOf(xel)
if (index > -1) {
this.xelements.splice(index)
}
}
/**
* 获取所有元素
*/
getAll () {
return this.xelements
}
}
```
### Painter
绘画控制器,它将舞台上的元素绘制到画布上,那么创建它时就需要提供一个`Stage`和画布——当然,库的通用做法是也可以提供一个容器,由库来创建画布。
```typescript
/**
* 创建canvas
*/
function createCanvas (dom: string | HTMLCanvasElement | HTMLElement) {
if (isString(dom)) {
dom = document.querySelector(dom as string) as HTMLElement
}
if (dom instanceof HTMLCanvasElement) {
return dom
}
let canvas = document.createElement('canvas');
(<HTMLElement>dom).appendChild(canvas)
return canvas
}
class Painter {
canvas: HTMLCanvasElement
stage: Stage
ctx: CanvasRenderingContext2D
constructor (dom: string | HTMLCanvasElement | HTMLElement, stage: Stage) {
this.canvas = createCanvas(dom)
this.stage = stage
this.ctx = this.canvas.getContext('2d')
}
}
```
它应该实现一个`render`方法,遍历`stage`中的元素进行绘制。
```typescript
render () {
let xelements = this.stage.getAll()
for (let i = 0; i < xelements.length; i += 1) {
xelements[i].refresh(this.ctx)
}
}
```
### XRender
最后一步啦,创建`XRender`将它们关联起来。这很简单。
```typescript
import XElement from './xElements/XElement'
import Stage from './Stage'
import Painter from './Painter'
class XRender {
stage: Stage
painter: Painter
constructor (dom: string | HTMLElement) {
let stage = new Stage()
this.stage = stage
this.painter = new Painter(dom, stage)
}
add (...xelements: XElement[]) {
this.stage.add(...xelements)
this.render()
}
render () {
this.painter.render()
}
}
```
现在去掉之前试验`Circle`的代码,保存之后可以看见,仍然绘制出了一个圆,这说明成功啦!
让我们再多添加几个圆试一下,并传入不同的参数。
```typescript
let xr = xrender.init('#app')
let circle = new xrender.Circle({
shape: {
cx: 40,
cy: 40,
r: 20
}
})
let circle1 = new xrender.Circle({
shape: {
cx: 60,
cy: 60,
r: 20
},
style: {
fill: '#00f'
}
})
let circle2 = new xrender.Circle({
shape: {
cx: 100,
cy: 100,
r: 40
},
style: {
fill: '#0ff',
stroke: '#f00'
}
})
xr.add(circle, circle1, circle2)
```
可以看到屏幕上出现了3个圆。接下来我们再尝试扩展一个矩形。
```typescript
interface RectShape extends XElementShape {
/**
* 左上角x
*/
x: number
/**
* 左上角y
*/
y: number
width: number
height: number
}
interface RectOptions extends XElementOptions {
shape: RectShape
}
class Rect extends XElement {
name ='rect'
shape: RectShape = {
x: 0,
y: 0,
width: 0,
height: 0
}
constructor (opt: RectOptions) {
super(opt)
}
render (ctx: CanvasRenderingContext2D) {
let shape = this.shape
ctx.rect(shape.x, shape.y, shape.width, shape.height)
}
}
```
然后在`App.vue`中添加代码:
```typescript
let rect = new xrender.Rect({
shape: {
x: 120,
y: 120,
width: 40,
height: 40
},
style: {
fill: 'transparent'
}
})
xr.add(rect)
```
可以看到矩形出现了。
## 小结
虽然还有很多问题,比如样式规则不完善,比如多次调用`add`会有不必要的重绘;实现添加圆和矩形这样的功能搞得如此复杂看起来也有点不必要。但是我们已经把基础的框架搭建好了,接下来相信可以逐步完善,最终达成我们想要的效果。
## V2预览
[下个版本](./Version2.md)中除了解决小结中出现的两个问题外,还将实现图形分层的功能,即指定图形的层叠顺序。 | {
"pile_set_name": "Github"
} |
var bool: BOOL____00002 :: is_defined_var :: var_is_introduced;
var bool: BOOL____00003 :: is_defined_var :: var_is_introduced;
var bool: BOOL____00004 :: is_defined_var :: var_is_introduced;
var bool: BOOL____00005 :: is_defined_var :: var_is_introduced;
var bool: BOOL____00007 :: is_defined_var :: var_is_introduced;
var bool: BOOL____00008 :: is_defined_var :: var_is_introduced;
var bool: BOOL____00009 :: is_defined_var :: var_is_introduced;
var bool: BOOL____00012 :: is_defined_var :: var_is_introduced;
var 1..4: end :: output_var;
var 1..4: start :: output_var;
array [1..4] of var 0..1: x :: output_array([1..4]);
constraint array_bool_and([BOOL____00003, BOOL____00004], BOOL____00005);
constraint array_bool_and([BOOL____00007, BOOL____00008], BOOL____00009);
constraint int_eq_reif(x[1], 1, BOOL____00002) :: defines_var(BOOL____00002);
constraint int_eq_reif(x[2], 1, BOOL____00005) :: defines_var(BOOL____00005);
constraint int_eq_reif(x[3], 1, BOOL____00009) :: defines_var(BOOL____00009);
constraint int_eq_reif(x[4], 1, BOOL____00012) :: defines_var(BOOL____00012);
constraint int_le(start, end);
constraint int_le_reif(2, end, BOOL____00004) :: defines_var(BOOL____00004);
constraint int_le_reif(3, end, BOOL____00008) :: defines_var(BOOL____00008);
constraint int_le_reif(4, end, BOOL____00012);
constraint int_le_reif(start, 1, BOOL____00002);
constraint int_le_reif(start, 2, BOOL____00003) :: defines_var(BOOL____00003);
constraint int_le_reif(start, 3, BOOL____00007) :: defines_var(BOOL____00007);
solve satisfy;
| {
"pile_set_name": "Github"
} |
<!DOCTYPE HTML>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Highcharts Example</title>
<style type="text/css">
#container, #sliders {
min-width: 310px;
max-width: 800px;
margin: 0 auto;
}
#container {
height: 400px;
}
</style>
</head>
<body>
<script src="https://code.jquery.com/jquery-3.1.1.min.js"></script>
<script src="../../code/highcharts.js"></script>
<script src="../../code/highcharts-3d.js"></script>
<script src="../../code/modules/exporting.js"></script>
<div id="container"></div>
<div id="sliders">
<table>
<tr>
<td>Alpha Angle</td>
<td><input id="alpha" type="range" min="0" max="45" value="15"/> <span id="alpha-value" class="value"></span></td>
</tr>
<tr>
<td>Beta Angle</td>
<td><input id="beta" type="range" min="-45" max="45" value="15"/> <span id="beta-value" class="value"></span></td>
</tr>
<tr>
<td>Depth</td>
<td><input id="depth" type="range" min="20" max="100" value="50"/> <span id="depth-value" class="value"></span></td>
</tr>
</table>
</div>
<script type="text/javascript">
// Set up the chart
var chart = new Highcharts.Chart({
chart: {
renderTo: 'container',
type: 'column',
options3d: {
enabled: true,
alpha: 15,
beta: 15,
depth: 50,
viewDistance: 25
}
},
title: {
text: 'Chart rotation demo'
},
subtitle: {
text: 'Test options by dragging the sliders below'
},
plotOptions: {
column: {
depth: 25
}
},
series: [{
data: [29.9, 71.5, 106.4, 129.2, 144.0, 176.0, 135.6, 148.5, 216.4, 194.1, 95.6, 54.4]
}]
});
function showValues() {
$('#alpha-value').html(chart.options.chart.options3d.alpha);
$('#beta-value').html(chart.options.chart.options3d.beta);
$('#depth-value').html(chart.options.chart.options3d.depth);
}
// Activate the sliders
$('#sliders input').on('input change', function () {
chart.options.chart.options3d[this.id] = parseFloat(this.value);
showValues();
chart.redraw(false);
});
showValues();
</script>
</body>
</html>
| {
"pile_set_name": "Github"
} |
from django.db import models
class ParentAward(models.Model):
award = models.OneToOneField("awards.Award", on_delete=models.CASCADE, primary_key=True)
generated_unique_award_id = models.TextField(unique=True)
parent_award = models.ForeignKey("self", on_delete=models.CASCADE, db_index=True, blank=True, null=True)
direct_idv_count = models.IntegerField()
direct_contract_count = models.IntegerField()
direct_total_obligation = models.DecimalField(max_digits=23, decimal_places=2)
direct_base_and_all_options_value = models.DecimalField(max_digits=23, decimal_places=2)
direct_base_exercised_options_val = models.DecimalField(max_digits=23, decimal_places=2)
rollup_idv_count = models.IntegerField()
rollup_contract_count = models.IntegerField()
rollup_total_obligation = models.DecimalField(max_digits=23, decimal_places=2)
rollup_base_and_all_options_value = models.DecimalField(max_digits=23, decimal_places=2)
rollup_base_exercised_options_val = models.DecimalField(max_digits=23, decimal_places=2)
class Meta:
managed = True
db_table = "parent_award"
| {
"pile_set_name": "Github"
} |
//======= Copyright (c) Valve Corporation, All rights reserved. ===============
//
// Purpose: Can be attached to the controller to collide with the balloons
//
//=============================================================================
using UnityEngine;
using System.Collections;
namespace Valve.VR.InteractionSystem
{
//-------------------------------------------------------------------------
public class BalloonColliders : MonoBehaviour
{
public GameObject[] colliders;
private Vector3[] colliderLocalPositions;
private Quaternion[] colliderLocalRotations;
private Rigidbody rb;
//-------------------------------------------------
void Awake()
{
rb = GetComponent<Rigidbody>();
colliderLocalPositions = new Vector3[colliders.Length];
colliderLocalRotations = new Quaternion[colliders.Length];
for ( int i = 0; i < colliders.Length; ++i )
{
colliderLocalPositions[i] = colliders[i].transform.localPosition;
colliderLocalRotations[i] = colliders[i].transform.localRotation;
colliders[i].name = gameObject.name + "." + colliders[i].name;
}
}
//-------------------------------------------------
void OnEnable()
{
for ( int i = 0; i < colliders.Length; ++i )
{
colliders[i].transform.SetParent( transform );
colliders[i].transform.localPosition = colliderLocalPositions[i];
colliders[i].transform.localRotation = colliderLocalRotations[i];
colliders[i].transform.SetParent( null );
FixedJoint fixedJoint = colliders[i].AddComponent<FixedJoint>();
fixedJoint.connectedBody = rb;
fixedJoint.breakForce = Mathf.Infinity;
fixedJoint.breakTorque = Mathf.Infinity;
fixedJoint.enableCollision = false;
fixedJoint.enablePreprocessing = true;
colliders[i].SetActive( true );
}
}
//-------------------------------------------------
void OnDisable()
{
for ( int i = 0; i < colliders.Length; ++i )
{
if ( colliders[i] != null )
{
Destroy( colliders[i].GetComponent<FixedJoint>() );
colliders[i].SetActive( false );
}
}
}
//-------------------------------------------------
void OnDestroy()
{
for ( int i = 0; i < colliders.Length; ++i )
{
Destroy( colliders[i] );
}
}
}
}
| {
"pile_set_name": "Github"
} |
# Copyright (c) 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from pypower.e2i_field import e2i_field
from pypower.i2e_field import i2e_field
from pypower.i2e_data import i2e_data
"""Enable or disable fixed reserve requirements.
"""
from sys import stderr
from pprint import pprint
from numpy import zeros, ones, arange, Inf, any, flatnonzero as find
from scipy.sparse import eye as speye
from scipy.sparse import csr_matrix as sparse
from scipy.sparse import hstack
from pypower.add_userfcn import add_userfcn
from pypower.remove_userfcn import remove_userfcn
from pypower.ext2int import ext2int
from pypower.int2ext import int2ext
from pypower.idx_gen import RAMP_10, PMAX, GEN_STATUS, GEN_BUS
def toggle_reserves(ppc, on_off):
"""Enable or disable fixed reserve requirements.
Enables or disables a set of OPF userfcn callbacks to implement
co-optimization of reserves with fixed zonal reserve requirements.
These callbacks expect to find a 'reserves' field in the input C{ppc},
where C{ppc['reserves']} is a dict with the following fields:
- C{zones} C{nrz x ng}, C{zone(i, j) = 1}, if gen C{j} belongs
to zone C{i} 0, otherwise
- C{req} C{nrz x 1}, zonal reserve requirement in MW
- C{cost} (C{ng} or C{ngr}) C{x 1}, cost of reserves in $/MW
- C{qty} (C{ng} or C{ngr}) C{x 1}, max quantity of reserves
in MW (optional)
where C{nrz} is the number of reserve zones and C{ngr} is the number of
generators belonging to at least one reserve zone and C{ng} is the total
number of generators.
The 'int2ext' callback also packages up results and stores them in
the following output fields of C{results['reserves']}:
- C{R} - C{ng x 1}, reserves provided by each gen in MW
- C{Rmin} - C{ng x 1}, lower limit on reserves provided by
each gen, (MW)
- C{Rmax} - C{ng x 1}, upper limit on reserves provided by
each gen, (MW)
- C{mu.l} - C{ng x 1}, shadow price on reserve lower limit, ($/MW)
- C{mu.u} - C{ng x 1}, shadow price on reserve upper limit, ($/MW)
- C{mu.Pmax} - C{ng x 1}, shadow price on C{Pg + R <= Pmax}
constraint, ($/MW)
- C{prc} - C{ng x 1}, reserve price for each gen equal to
maximum of the shadow prices on the zonal requirement constraint
for each zone the generator belongs to
@see: L{runopf_w_res}, L{add_userfcn}, L{remove_userfcn}, L{run_userfcn},
L{t.t_case30_userfcns}
@author: Ray Zimmerman (PSERC Cornell)
"""
if on_off == 'on':
## check for proper reserve inputs
if ('reserves' not in ppc) | (not isinstance(ppc['reserves'], dict)) | \
('zones' not in ppc['reserves']) | \
('req' not in ppc['reserves']) | \
('cost' not in ppc['reserves']):
stderr.write('toggle_reserves: case must contain a \'reserves\' field, a struct defining \'zones\', \'req\' and \'cost\'\n')
## add callback functions
## note: assumes all necessary data included in 1st arg (ppc, om, results)
## so, no additional explicit args are needed
ppc = add_userfcn(ppc, 'ext2int', userfcn_reserves_ext2int)
ppc = add_userfcn(ppc, 'formulation', userfcn_reserves_formulation)
ppc = add_userfcn(ppc, 'int2ext', userfcn_reserves_int2ext)
ppc = add_userfcn(ppc, 'printpf', userfcn_reserves_printpf)
ppc = add_userfcn(ppc, 'savecase', userfcn_reserves_savecase)
elif on_off == 'off':
ppc = remove_userfcn(ppc, 'savecase', userfcn_reserves_savecase)
ppc = remove_userfcn(ppc, 'printpf', userfcn_reserves_printpf)
ppc = remove_userfcn(ppc, 'int2ext', userfcn_reserves_int2ext)
ppc = remove_userfcn(ppc, 'formulation', userfcn_reserves_formulation)
ppc = remove_userfcn(ppc, 'ext2int', userfcn_reserves_ext2int)
else:
stderr.write('toggle_reserves: 2nd argument must be either ''on'' or ''off''')
return ppc
def userfcn_reserves_ext2int(ppc, *args):
"""This is the 'ext2int' stage userfcn callback that prepares the input
data for the formulation stage. It expects to find a 'reserves' field
in ppc as described above. The optional args are not currently used.
"""
## initialize some things
r = ppc['reserves']
o = ppc['order']
ng0 = o['ext']['gen'].shape[0] ## number of original gens (+ disp loads)
nrz = r['req'].shape[0] ## number of reserve zones
if nrz > 1:
ppc['reserves']['rgens'] = any(r['zones'], 0) ## mask of gens available to provide reserves
else:
ppc['reserves']['rgens'] = r['zones']
igr = find(ppc['reserves']['rgens']) ## indices of gens available to provide reserves
ngr = len(igr) ## number of gens available to provide reserves
## check data for consistent dimensions
if r['zones'].shape[0] != nrz:
stderr.write('userfcn_reserves_ext2int: the number of rows in ppc[\'reserves\'][\'req\'] (%d) and ppc[\'reserves\'][\'zones\'] (%d) must match\n' % (nrz, r['zones'].shape[0]))
if (r['cost'].shape[0] != ng0) & (r['cost'].shape[0] != ngr):
stderr.write('userfcn_reserves_ext2int: the number of rows in ppc[\'reserves\'][\'cost\'] (%d) must equal the total number of generators (%d) or the number of generators able to provide reserves (%d)\n' % (r['cost'].shape[0], ng0, ngr))
if 'qty' in r:
if r['qty'].shape[0] != r['cost'].shape[0]:
stderr.write('userfcn_reserves_ext2int: ppc[\'reserves\'][\'cost\'] (%d x 1) and ppc[\'reserves\'][\'qty\'] (%d x 1) must be the same dimension\n' % (r['cost'].shape[0], r['qty'].shape[0]))
## convert both cost and qty from ngr x 1 to full ng x 1 vectors if necessary
if r['cost'].shape[0] < ng0:
if 'original' not in ppc['reserves']:
ppc['reserves']['original'] = {}
ppc['reserves']['original']['cost'] = r['cost'].copy() ## save original
cost = zeros(ng0)
cost[igr] = r['cost']
ppc['reserves']['cost'] = cost
if 'qty' in r:
ppc['reserves']['original']['qty'] = r['qty'].copy() ## save original
qty = zeros(ng0)
qty[igr] = r['qty']
ppc['reserves']['qty'] = qty
##----- convert stuff to internal indexing -----
## convert all reserve parameters (zones, costs, qty, rgens)
if 'qty' in r:
ppc = e2i_field(ppc, ['reserves', 'qty'], 'gen')
ppc = e2i_field(ppc, ['reserves', 'cost'], 'gen')
ppc = e2i_field(ppc, ['reserves', 'zones'], 'gen', 1)
ppc = e2i_field(ppc, ['reserves', 'rgens'], 'gen', 1)
## save indices of gens available to provide reserves
ppc['order']['ext']['reserves']['igr'] = igr ## external indexing
ppc['reserves']['igr'] = find(ppc['reserves']['rgens']) ## internal indexing
return ppc
def userfcn_reserves_formulation(om, *args):
"""This is the 'formulation' stage userfcn callback that defines the
user costs and constraints for fixed reserves. It expects to find
a 'reserves' field in the ppc stored in om, as described above.
By the time it is passed to this callback, ppc['reserves'] should
have two additional fields:
- C{igr} C{1 x ngr}, indices of generators available for reserves
- C{rgens} C{1 x ng}, 1 if gen avaiable for reserves, 0 otherwise
It is also assumed that if cost or qty were C{ngr x 1}, they have been
expanded to C{ng x 1} and that everything has been converted to
internal indexing, i.e. all gens are on-line (by the 'ext2int'
callback). The optional args are not currently used.
"""
## initialize some things
ppc = om.get_ppc()
r = ppc['reserves']
igr = r['igr'] ## indices of gens available to provide reserves
ngr = len(igr) ## number of gens available to provide reserves
ng = ppc['gen'].shape[0] ## number of on-line gens (+ disp loads)
## variable bounds
Rmin = zeros(ngr) ## bound below by 0
Rmax = Inf * ones(ngr) ## bound above by ...
k = find(ppc['gen'][igr, RAMP_10])
Rmax[k] = ppc['gen'][igr[k], RAMP_10] ## ... ramp rate and ...
if 'qty' in r:
k = find(r['qty'][igr] < Rmax)
Rmax[k] = r['qty'][igr[k]] ## ... stated max reserve qty
Rmax = Rmax / ppc['baseMVA']
## constraints
I = speye(ngr, ngr, format='csr') ## identity matrix
Ar = hstack([sparse((ones(ngr), (arange(ngr), igr)), (ngr, ng)), I], 'csr')
ur = ppc['gen'][igr, PMAX] / ppc['baseMVA']
lreq = r['req'] / ppc['baseMVA']
## cost
Cw = r['cost'][igr] * ppc['baseMVA'] ## per unit cost coefficients
## add them to the model
om.add_vars('R', ngr, [], Rmin, Rmax)
om.add_constraints('Pg_plus_R', Ar, [], ur, ['Pg', 'R'])
om.add_constraints('Rreq', sparse( r['zones'][:, igr] ), lreq, [], ['R'])
om.add_costs('Rcost', {'N': I, 'Cw': Cw}, ['R'])
return om
def userfcn_reserves_int2ext(results, *args):
"""This is the 'int2ext' stage userfcn callback that converts everything
back to external indexing and packages up the results. It expects to
find a 'reserves' field in the results struct as described for ppc
above, including the two additional fields 'igr' and 'rgens'. It also
expects the results to contain a variable 'R' and linear constraints
'Pg_plus_R' and 'Rreq' which are used to populate output fields in
results.reserves. The optional args are not currently used.
"""
## initialize some things
r = results['reserves']
## grab some info in internal indexing order
igr = r['igr'] ## indices of gens available to provide reserves
ng = results['gen'].shape[0] ## number of on-line gens (+ disp loads)
##----- convert stuff back to external indexing -----
## convert all reserve parameters (zones, costs, qty, rgens)
if 'qty' in r:
results = i2e_field(results, ['reserves', 'qty'], ordering='gen')
results = i2e_field(results, ['reserves', 'cost'], ordering='gen')
results = i2e_field(results, ['reserves', 'zones'], ordering='gen', dim=1)
results = i2e_field(results, ['reserves', 'rgens'], ordering='gen', dim=1)
results['order']['int']['reserves']['igr'] = results['reserves']['igr'] ## save internal version
results['reserves']['igr'] = results['order']['ext']['reserves']['igr'] ## use external version
r = results['reserves'] ## update
o = results['order'] ## update
## grab same info in external indexing order
igr0 = r['igr'] ## indices of gens available to provide reserves
ng0 = o['ext']['gen'].shape[0] ## number of gens (+ disp loads)
##----- results post-processing -----
## get the results (per gen reserves, multipliers) with internal gen indexing
## and convert from p.u. to per MW units
_, Rl, Ru = results['om'].getv('R')
R = zeros(ng)
Rmin = zeros(ng)
Rmax = zeros(ng)
mu_l = zeros(ng)
mu_u = zeros(ng)
mu_Pmax = zeros(ng)
R[igr] = results['var']['val']['R'] * results['baseMVA']
Rmin[igr] = Rl * results['baseMVA']
Rmax[igr] = Ru * results['baseMVA']
mu_l[igr] = results['var']['mu']['l']['R'] / results['baseMVA']
mu_u[igr] = results['var']['mu']['u']['R'] / results['baseMVA']
mu_Pmax[igr] = results['lin']['mu']['u']['Pg_plus_R'] / results['baseMVA']
## store in results in results struct
z = zeros(ng0)
results['reserves']['R'] = i2e_data(results, R, z, 'gen')
results['reserves']['Rmin'] = i2e_data(results, Rmin, z, 'gen')
results['reserves']['Rmax'] = i2e_data(results, Rmax, z, 'gen')
if 'mu' not in results['reserves']:
results['reserves']['mu'] = {}
results['reserves']['mu']['l'] = i2e_data(results, mu_l, z, 'gen')
results['reserves']['mu']['u'] = i2e_data(results, mu_u, z, 'gen')
results['reserves']['mu']['Pmax'] = i2e_data(results, mu_Pmax, z, 'gen')
results['reserves']['prc'] = z
for k in igr0:
iz = find(r['zones'][:, k])
results['reserves']['prc'][k] = sum(results['lin']['mu']['l']['Rreq'][iz]) / results['baseMVA']
results['reserves']['totalcost'] = results['cost']['Rcost']
## replace ng x 1 cost, qty with ngr x 1 originals
if 'original' in r:
if 'qty' in r:
results['reserves']['qty'] = r['original']['qty']
results['reserves']['cost'] = r['original']['cost']
del results['reserves']['original']
return results
def userfcn_reserves_printpf(results, fd, ppopt, *args):
"""This is the 'printpf' stage userfcn callback that pretty-prints the
results. It expects a C{results} dict, a file descriptor and a PYPOWER
options vector. The optional args are not currently used.
"""
##----- print results -----
r = results['reserves']
nrz = r['req'].shape[0]
OUT_ALL = ppopt['OUT_ALL']
if OUT_ALL != 0:
fd.write('\n================================================================================')
fd.write('\n| Reserves |')
fd.write('\n================================================================================')
fd.write('\n Gen Bus Status Reserves Price')
fd.write('\n # # (MW) ($/MW) Included in Zones ...')
fd.write('\n---- ----- ------ -------- -------- ------------------------')
for k in r['igr']:
iz = find(r['zones'][:, k])
fd.write('\n%3d %6d %2d ' % (k, results['gen'][k, GEN_BUS], results['gen'][k, GEN_STATUS]))
if (results['gen'][k, GEN_STATUS] > 0) & (abs(results['reserves']['R'][k]) > 1e-6):
fd.write('%10.2f' % results['reserves']['R'][k])
else:
fd.write(' - ')
fd.write('%10.2f ' % results['reserves']['prc'][k])
for i in range(len(iz)):
if i != 0:
fd.write(', ')
fd.write('%d' % iz[i])
fd.write('\n --------')
fd.write('\n Total:%10.2f Total Cost: $%.2f' %
(sum(results['reserves']['R'][r['igr']]), results['reserves']['totalcost']))
fd.write('\n')
fd.write('\nZone Reserves Price ')
fd.write('\n # (MW) ($/MW) ')
fd.write('\n---- -------- --------')
for k in range(nrz):
iz = find(r['zones'][k, :]) ## gens in zone k
fd.write('\n%3d%10.2f%10.2f' % (k, sum(results['reserves']['R'][iz]),
results['lin']['mu']['l']['Rreq'][k] / results['baseMVA']))
fd.write('\n')
fd.write('\n================================================================================')
fd.write('\n| Reserve Limits |')
fd.write('\n================================================================================')
fd.write('\n Gen Bus Status Rmin mu Rmin Reserves Rmax Rmax mu Pmax mu ')
fd.write('\n # # ($/MW) (MW) (MW) (MW) ($/MW) ($/MW) ')
fd.write('\n---- ----- ------ -------- -------- -------- -------- -------- --------')
for k in r['igr']:
fd.write('\n%3d %6d %2d ' % (k, results['gen'][k, GEN_BUS], results['gen'][k, GEN_STATUS]))
if (results['gen'][k, GEN_STATUS] > 0) & (results['reserves']['mu']['l'][k] > 1e-6):
fd.write('%10.2f' % results['reserves']['mu']['l'][k])
else:
fd.write(' - ')
fd.write('%10.2f' % results['reserves']['Rmin'][k])
if (results['gen'][k, GEN_STATUS] > 0) & (abs(results['reserves']['R'][k]) > 1e-6):
fd.write('%10.2f' % results['reserves']['R'][k])
else:
fd.write(' - ')
fd.write('%10.2f' % results['reserves']['Rmax'][k])
if (results['gen'][k, GEN_STATUS] > 0) & (results['reserves']['mu']['u'][k] > 1e-6):
fd.write('%10.2f' % results['reserves']['mu']['u'][k])
else:
fd.write(' - ')
if (results['gen'][k, GEN_STATUS] > 0) & (results['reserves']['mu']['Pmax'][k] > 1e-6):
fd.write('%10.2f' % results['reserves']['mu']['Pmax'][k])
else:
fd.write(' - ')
fd.write('\n --------')
fd.write('\n Total:%10.2f' % sum(results['reserves']['R'][r['igr']]))
fd.write('\n')
return results
def userfcn_reserves_savecase(ppc, fd, prefix, *args):
"""This is the 'savecase' stage userfcn callback that prints the Python
file code to save the 'reserves' field in the case file. It expects a
PYPOWER case dict (ppc), a file descriptor and variable prefix
(usually 'ppc'). The optional args are not currently used.
"""
r = ppc['reserves']
fd.write('\n####----- Reserve Data -----####\n')
fd.write('#### reserve zones, element i, j is 1 if gen j is in zone i, 0 otherwise\n')
fd.write('%sreserves.zones = [\n' % prefix)
template = ''
for _ in range(r['zones'].shape[1]):
template = template + '\t%d'
template = template + ';\n'
fd.write(template, r.zones.T)
fd.write('];\n')
fd.write('\n#### reserve requirements for each zone in MW\n')
fd.write('%sreserves.req = [\t%g' % (prefix, r['req'][0]))
if len(r['req']) > 1:
fd.write(';\t%g' % r['req'][1:])
fd.write('\t];\n')
fd.write('\n#### reserve costs in $/MW for each gen that belongs to at least 1 zone\n')
fd.write('#### (same order as gens, but skipping any gen that does not belong to any zone)\n')
fd.write('%sreserves.cost = [\t%g' % (prefix, r['cost'][0]))
if len(r['cost']) > 1:
fd.write(';\t%g' % r['cost'][1:])
fd.write('\t];\n')
if 'qty' in r:
fd.write('\n#### OPTIONAL max reserve quantities for each gen that belongs to at least 1 zone\n')
fd.write('#### (same order as gens, but skipping any gen that does not belong to any zone)\n')
fd.write('%sreserves.qty = [\t%g' % (prefix, r['qty'][0]))
if len(r['qty']) > 1:
fd.write(';\t%g' % r['qty'][1:])
fd.write('\t];\n')
## save output fields for solved case
if 'R' in r:
fd.write('\n#### solved values\n')
fd.write('%sreserves.R = %s\n' % (prefix, pprint(r['R'])))
fd.write('%sreserves.Rmin = %s\n' % (prefix, pprint(r['Rmin'])))
fd.write('%sreserves.Rmax = %s\n' % (prefix, pprint(r['Rmax'])))
fd.write('%sreserves.mu.l = %s\n' % (prefix, pprint(r['mu']['l'])))
fd.write('%sreserves.mu.u = %s\n' % (prefix, pprint(r['mu']['u'])))
fd.write('%sreserves.prc = %s\n' % (prefix, pprint(r['prc'])))
fd.write('%sreserves.totalcost = %s\n' % (prefix, pprint(r['totalcost'])))
return ppc
| {
"pile_set_name": "Github"
} |
/*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package org.apache.flink.runtime.operators.coordination;
import java.util.ArrayList;
import java.util.List;
/**
* A mock {@link OperatorEventGateway} for unit tests.
*/
public class MockOperatorEventGateway implements OperatorEventGateway {
// The events that are sent through this gateway.
private final List<OperatorEvent> eventsSent = new ArrayList<>();
@Override
public void sendEventToCoordinator(OperatorEvent event) {
eventsSent.add(event);
}
public List<OperatorEvent> getEventsSent() {
return eventsSent;
}
}
| {
"pile_set_name": "Github"
} |
# Exim test configuration 5730
# OCSP stapling, client, events
SERVER =
exim_path = EXIM_PATH
keep_environment = ^EXIM_TESTHARNESS_DISABLE_[O]CSPVALIDITYCHECK$
host_lookup_order = bydns
spool_directory = DIR/spool
log_file_path = DIR/spool/log/SERVER%slog
gecos_pattern = ""
gecos_name = CALLER_NAME
chunking_advertise_hosts =
primary_hostname = server1.example.com
.ifdef _HAVE_DMARC
dmarc_tld_file =
.endif
# ----- Main settings -----
domainlist local_domains = test.ex : *.test.ex
acl_smtp_rcpt = check_recipient
acl_smtp_data = check_data
log_selector = +tls_peerdn
remote_max_parallel = 1
tls_advertise_hosts = *
# Set certificate only if server
tls_certificate = ${if eq {SERVER}{server}\
{DIR/aux-fixed/exim-ca/example.com/server1.example.com/server1.example.com.chain.pem}\
fail\
}
tls_privatekey = ${if eq {SERVER}{server}\
{DIR/aux-fixed/exim-ca/example.com/server1.example.com/server1.example.com.unlocked.key}\
fail}
# from cmdline define
tls_ocsp_file = OPT
# ------ ACL ------
begin acl
check_recipient:
accept domains = +local_domains
deny message = relay not permitted
check_data:
warn condition = ${if def:h_X-TLS-out:}
logwrite = client claims: $h_X-TLS-out:
accept
logger:
accept condition = ${if !eq {msg} {${listextract{1}{$event_name}}}}
accept condition = ${if eq {host} {${listextract{2}{$event_name}}}}
warn logwrite = client ocsp status: $tls_out_ocsp \
(${listextract {${eval:$tls_out_ocsp+1}} \
{notreq:notresp:vfynotdone:failed:verified}})
accept
# ----- Routers -----
begin routers
client:
driver = accept
condition = ${if eq {SERVER}{server}{no}{yes}}
retry_use_local_part
transport = send_to_server${if eq{$local_part}{nostaple}{1} \
{${if eq{$local_part}{norequire} {2} \
{${if eq{$local_part}{smtps} {4}{3}}} \
}}}
server:
driver = redirect
data = :blackhole:
#retry_use_local_part
#transport = local_delivery
# ----- Transports -----
begin transports
local_delivery:
driver = appendfile
file = DIR/test-mail/${bless:$local_part}
headers_add = TLS: cipher=$tls_cipher peerdn=$tls_peerdn
user = CALLER
send_to_server1:
driver = smtp
allow_localhost
hosts = HOSTIPV4
port = PORT_D
hosts_try_fastopen = :
tls_verify_certificates = DIR/aux-fixed/exim-ca/example.com/CA/CA.pem
tls_verify_cert_hostnames =
hosts_require_tls = *
hosts_request_ocsp = :
headers_add = X-TLS-out: OCSP status $tls_out_ocsp \
(${listextract {${eval:$tls_out_ocsp+1}} \
{notreq:notresp:vfynotdone:failed:verified}})
event_action = ${acl {logger}}
send_to_server2:
driver = smtp
allow_localhost
hosts = HOSTIPV4
port = PORT_D
hosts_try_fastopen = :
tls_verify_certificates = DIR/aux-fixed/exim-ca/example.com/CA/CA.pem
tls_verify_cert_hostnames =
hosts_require_tls = *
# note no ocsp mention here
headers_add = X-TLS-out: OCSP status $tls_out_ocsp \
(${listextract {${eval:$tls_out_ocsp+1}} \
{notreq:notresp:vfynotdone:failed:verified}})
event_action = ${acl {logger}}
send_to_server3:
driver = smtp
allow_localhost
hosts = 127.0.0.1
port = PORT_D
hosts_try_fastopen = :
helo_data = helo.data.changed
#tls_verify_certificates = DIR/aux-fixed/exim-ca/example.com/server1.example.com/ca_chain.pem
tls_verify_certificates = DIR/aux-fixed/exim-ca/example.com/CA/CA.pem
tls_try_verify_hosts =
tls_verify_cert_hostnames =
hosts_require_tls = *
hosts_require_ocsp = *
headers_add = X-TLS-out: OCSP status $tls_out_ocsp \
(${listextract {${eval:$tls_out_ocsp+1}} \
{notreq:notresp:vfynotdone:failed:verified}})
event_action = ${acl {logger}}
send_to_server4:
driver = smtp
allow_localhost
hosts = 127.0.0.1
port = PORT_D
hosts_try_fastopen = :
helo_data = helo.data.changed
#tls_verify_certificates = DIR/aux-fixed/exim-ca/example.com/server1.example.com/ca_chain.pem
tls_verify_certificates = DIR/aux-fixed/exim-ca/example.com/CA/CA.pem
tls_verify_cert_hostnames =
protocol = smtps
hosts_require_tls = *
hosts_require_ocsp = *
headers_add = X-TLS-out: OCSP status $tls_out_ocsp \
(${listextract {${eval:$tls_out_ocsp+1}} \
{notreq:notresp:vfynotdone:failed:verified}})
event_action = ${acl {logger}}
# ----- Retry -----
begin retry
* * F,5d,1s
# End
| {
"pile_set_name": "Github"
} |
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package api
import (
"encoding/base64"
"errors"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
)
func init() {
sDec, _ := base64.StdEncoding.DecodeString("REDACTED+")
redactedBytes = []byte(string(sDec))
sDec, _ = base64.StdEncoding.DecodeString("DATA+OMITTED")
dataOmittedBytes = []byte(string(sDec))
}
// IsConfigEmpty returns true if the config is empty.
func IsConfigEmpty(config *Config) bool {
return len(config.AuthInfos) == 0 && len(config.Clusters) == 0 && len(config.Contexts) == 0 &&
len(config.CurrentContext) == 0 &&
len(config.Preferences.Extensions) == 0 && !config.Preferences.Colors &&
len(config.Extensions) == 0
}
// MinifyConfig read the current context and uses that to keep only the relevant pieces of config
// This is useful for making secrets based on kubeconfig files
func MinifyConfig(config *Config) error {
if len(config.CurrentContext) == 0 {
return errors.New("current-context must exist in order to minify")
}
currContext, exists := config.Contexts[config.CurrentContext]
if !exists {
return fmt.Errorf("cannot locate context %v", config.CurrentContext)
}
newContexts := map[string]*Context{}
newContexts[config.CurrentContext] = currContext
newClusters := map[string]*Cluster{}
if len(currContext.Cluster) > 0 {
if _, exists := config.Clusters[currContext.Cluster]; !exists {
return fmt.Errorf("cannot locate cluster %v", currContext.Cluster)
}
newClusters[currContext.Cluster] = config.Clusters[currContext.Cluster]
}
newAuthInfos := map[string]*AuthInfo{}
if len(currContext.AuthInfo) > 0 {
if _, exists := config.AuthInfos[currContext.AuthInfo]; !exists {
return fmt.Errorf("cannot locate user %v", currContext.AuthInfo)
}
newAuthInfos[currContext.AuthInfo] = config.AuthInfos[currContext.AuthInfo]
}
config.AuthInfos = newAuthInfos
config.Clusters = newClusters
config.Contexts = newContexts
return nil
}
var (
redactedBytes []byte
dataOmittedBytes []byte
)
// Flatten redacts raw data entries from the config object for a human-readable view.
func ShortenConfig(config *Config) {
// trick json encoder into printing a human readable string in the raw data
// by base64 decoding what we want to print. Relies on implementation of
// http://golang.org/pkg/encoding/json/#Marshal using base64 to encode []byte
for key, authInfo := range config.AuthInfos {
if len(authInfo.ClientKeyData) > 0 {
authInfo.ClientKeyData = redactedBytes
}
if len(authInfo.ClientCertificateData) > 0 {
authInfo.ClientCertificateData = redactedBytes
}
config.AuthInfos[key] = authInfo
}
for key, cluster := range config.Clusters {
if len(cluster.CertificateAuthorityData) > 0 {
cluster.CertificateAuthorityData = dataOmittedBytes
}
config.Clusters[key] = cluster
}
}
// Flatten changes the config object into a self contained config (useful for making secrets)
func FlattenConfig(config *Config) error {
for key, authInfo := range config.AuthInfos {
baseDir, err := MakeAbs(path.Dir(authInfo.LocationOfOrigin), "")
if err != nil {
return err
}
if err := FlattenContent(&authInfo.ClientCertificate, &authInfo.ClientCertificateData, baseDir); err != nil {
return err
}
if err := FlattenContent(&authInfo.ClientKey, &authInfo.ClientKeyData, baseDir); err != nil {
return err
}
config.AuthInfos[key] = authInfo
}
for key, cluster := range config.Clusters {
baseDir, err := MakeAbs(path.Dir(cluster.LocationOfOrigin), "")
if err != nil {
return err
}
if err := FlattenContent(&cluster.CertificateAuthority, &cluster.CertificateAuthorityData, baseDir); err != nil {
return err
}
config.Clusters[key] = cluster
}
return nil
}
func FlattenContent(path *string, contents *[]byte, baseDir string) error {
if len(*path) != 0 {
if len(*contents) > 0 {
return errors.New("cannot have values for both path and contents")
}
var err error
absPath := ResolvePath(*path, baseDir)
*contents, err = ioutil.ReadFile(absPath)
if err != nil {
return err
}
*path = ""
}
return nil
}
// ResolvePath returns the path as an absolute paths, relative to the given base directory
func ResolvePath(path string, base string) string {
// Don't resolve empty paths
if len(path) > 0 {
// Don't resolve absolute paths
if !filepath.IsAbs(path) {
return filepath.Join(base, path)
}
}
return path
}
func MakeAbs(path, base string) (string, error) {
if filepath.IsAbs(path) {
return path, nil
}
if len(base) == 0 {
cwd, err := os.Getwd()
if err != nil {
return "", err
}
base = cwd
}
return filepath.Join(base, path), nil
}
| {
"pile_set_name": "Github"
} |
package com.azhon.mvvm.linkage;
import android.os.Bundle;
import android.widget.SeekBar;
import androidx.lifecycle.ViewModelProviders;
import com.azhon.basic.base.BaseFragment;
import com.azhon.mvvm.R;
import com.azhon.mvvm.databinding.FragmentLinkageBinding;
/**
* 项目名: TODO-MVVM
* 包名 com.azhon.mvvm.linkage
* 文件名: LinkageFragment
* 创建时间: 2019-03-29 on 20:24
* 描述: TODO DataBinding #setLifecycleOwner()使用示例
*
* @author 阿钟
*/
public class LinkageFragment extends BaseFragment<LinkageViewModel, FragmentLinkageBinding>
implements SeekBar.OnSeekBarChangeListener {
public static LinkageFragment newInstance() {
Bundle args = new Bundle();
LinkageFragment fragment = new LinkageFragment();
fragment.setArguments(args);
return fragment;
}
@Override
protected int onCreate() {
return R.layout.fragment_linkage;
}
@Override
protected void initView() {
dataBinding.skII.setOnSeekBarChangeListener(this);
}
@Override
protected void initData() {
dataBinding.setModel(viewModel);
//允许绑定观察ViewModel中的LiveData数据,当LiveData数据更新时,布局会自动更新数据
dataBinding.setLifecycleOwner(this);
}
@Override
protected LinkageViewModel initViewModel() {
return ViewModelProviders.of(getActivity()).get(LinkageViewModel.class);
}
@Override
protected void showError(Object obj) {
}
@Override
public void onProgressChanged(SeekBar seekBar, int progress, boolean fromUser) {
viewModel.getProgress().setValue(progress);
}
@Override
public void onStartTrackingTouch(SeekBar seekBar) {
}
@Override
public void onStopTrackingTouch(SeekBar seekBar) {
}
}
| {
"pile_set_name": "Github"
} |
---
external help file: Microsoft.CertificateServices.Administration.Commands.dll-Help.xml
Module Name: ADCSAdministration
online version:
schema: 2.0.0
title: Add-CATemplate
ms.author: v-anbarr
ms.reviewer: brianlic
description:
keywords: powershell, cmdlet
author: andreabarr
manager: jasgro
ms.date: 2017-10-30
ms.topic: reference
ms.prod: powershell
ms.technology: powershell
ms.assetid: C8A5382D-23F3-4DDE-9661-EAF69E65B2AB
---
# Add-CATemplate
## SYNOPSIS
Adds a certificate template to the CA.
## SYNTAX
```
Add-CATemplate [-Name] <String> [-Force] [-WhatIf] [-Confirm] [<CommonParameters>]
```
## DESCRIPTION
The Add-CATemplate cmdlet adds a certificate template to the CA for issuing.
A certificate template is a preconfigured list of certificate settings that allows users and computers to enroll for certificates without having to create complex certificate requests.
Certificate templates allow for the customization of a certificate that can be issued by the CA.
The template defines items such as the cryptographic types, validity and renewal periods, and certificate purposes.
The certificate templates are stored in Active Directory Domain Services (AD DS).
Many default certificate templates are added to AD DS when the CA role service is installed.
The Add-CATemplate cmdlet does not allow you to create new templates or duplicate existing templates.
## EXAMPLES
### -------------------------- EXAMPLE 1 --------------------------
```
C:\PS>Add-CATemplate -Name EFS
```
Description
-----------
Adds a CA template with the template display name Basic EFS and the template name EFS.
## PARAMETERS
### -Confirm
Prompts you for confirmation before running the cmdlet.
```yaml
Type: SwitchParameter
Parameter Sets: (All)
Aliases: cf
Required: False
Position: Named
Default value: False
Accept pipeline input: False
Accept wildcard characters: False
```
### -Force
Forces the command to run without asking for user confirmation.
```yaml
Type: SwitchParameter
Parameter Sets: (All)
Aliases:
Required: False
Position: Named
Default value: None
Accept pipeline input: False
Accept wildcard characters: False
```
### -Name
Specifies the name of a certificate template name.
This name must always be the template name (short name without spaces) and not the template display name.
For example, the certificate template with the template display name of Exchange Enrollment Agent (Offline request) must be specified by its template name, which is EnrollmentAgentOffline.
```yaml
Type: String
Parameter Sets: (All)
Aliases:
Required: True
Position: 1
Default value: None
Accept pipeline input: True (ByPropertyName, ByValue)
Accept wildcard characters: False
```
### -WhatIf
Shows what would happen if the cmdlet runs.
The cmdlet is not run.
```yaml
Type: SwitchParameter
Parameter Sets: (All)
Aliases: wi
Required: False
Position: Named
Default value: False
Accept pipeline input: False
Accept wildcard characters: False
```
### CommonParameters
This cmdlet supports the common parameters: -Debug, -ErrorAction, -ErrorVariable, -InformationAction, -InformationVariable, -OutVariable, -OutBuffer, -PipelineVariable, -Verbose, -WarningAction, and -WarningVariable. For more information, see about_CommonParameters (http://go.microsoft.com/fwlink/?LinkID=113216).
## INPUTS
### System.String
There is only one parameter for this cmdlet (Name) and it can only accept a single template each time specified by name as a string.
## OUTPUTS
### None
## NOTES
* To perform this procedure, you must be a member of the Domain Admins group or the Enterprise Admins group in Active Directory Domain Services (AD DS), or you must have been delegated the appropriate authority. As a security best practice, consider using Run as to perform this procedure.
## RELATED LINKS
[Get-CATemplate](./Get-CATemplate.md)
[Remove-CATemplate](./Remove-CATemplate.md)
| {
"pile_set_name": "Github"
} |
using FluentMigrator.Runner.Generators.Generic;
namespace FluentMigrator.Runner.Generators.SQLite
{
// ReSharper disable once InconsistentNaming
public class SQLiteQuoter : GenericQuoter
{
public override string FormatSystemMethods(SystemMethods value)
{
switch (value)
{
case SystemMethods.CurrentUTCDateTime:
return "CURRENT_TIMESTAMP";
case SystemMethods.CurrentDateTime:
return "(datetime('now','localtime'))";
}
return base.FormatSystemMethods(value);
}
public override string QuoteSchemaName(string schemaName)
{
return string.Empty;
}
protected override string FormatByteArray(byte[] value)
{
var hex = new System.Text.StringBuilder((value.Length * 2) + 3);
hex.Append("X'");
foreach (var b in value)
{
hex.AppendFormat("{0:x2}", b);
}
hex.Append("'");
return hex.ToString();
}
}
}
| {
"pile_set_name": "Github"
} |
//*LB*
// Copyright (c) 2010, University of Bonn, Institute for Computer Science VI
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of the University of Bonn
// nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written
// permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//*LE*
/**
* @file image.hpp
* @brief general base class for images
* @ingroup data_structures
* @author Hannes Schulz
* @date 2011-05-19
*/
#ifndef __IMAGE_HPP__
#define __IMAGE_HPP__
#include <cuv/basics/tensor.hpp>
namespace cuv
{
/**
* a wrapper around a tensor to provide an interleaved (e.g. RGBRGBRGB...) image
*
* the internal tensor is a _strided_ tensor.
* @ingroup data_structures
*/
template<int NumChannels, class __value_type, class __memory_space_type>
class
interleaved_image{
public:
/// the type of the wrapped tensor: Row-major and strided!
typedef cuv::tensor<__value_type,__memory_space_type,row_major> tensor_type;
/// the index type
typedef typename tensor_type::index_type index_type;
/// the type of returned references
typedef typename tensor_type::reference_type reference_type;
private:
tensor_type m_tens;
unsigned int m_height;
unsigned int m_width;
public:
static const int num_channels = NumChannels;
/**
* construct an interleaved image based on dimensions
*
* @param h height
* @param w width
* @param c number of channels
*/
interleaved_image(unsigned int h, unsigned int w, unsigned int c=1)
: m_tens(extents[h][w*NumChannels]),
m_height(h),
m_width(w)
{
}
/**
* copy-construct an interleaved_image
* @param o source image
*/
interleaved_image(const interleaved_image& o)
: m_tens(o.tens()),
m_height(o.height()),
m_width(o.width())
{
}
/// @return the width of the image
inline index_type width()const{ return m_width; }
/// @return the height of the image
inline index_type height()const{ return m_height; }
/// @return the number of channels
inline index_type channels()const{ return NumChannels; }
/// @return the wrapped tensor
inline const tensor_type& tens()const{ return m_tens; }
/**
* element access
*
* @param i index along height
* @param j index along width
* @param c index of channel
*/
reference_type
operator()(index_type i, index_type j, index_type c=0){
return m_tens(i,j*NumChannels+c);
}
/**
* const element access
*
* @param i index along height
* @param j index along width
* @param c index of channel
*/
const reference_type
operator()(index_type i, index_type j, index_type c=0)const{
return m_tens(i,j*NumChannels+c);
}
/**
* assignment operator
*
* @param o source image
*/
interleaved_image&
operator=(const interleaved_image& o){
m_width = o.width();
m_height = o.height();
m_tens = o.tens();
return *this;
}
};
}
#endif /* __IMAGE_HPP__ */
| {
"pile_set_name": "Github"
} |
// Test -fsanitize-memory-use-after-dtor
// RUN: %clang_cc1 -O0 -fsanitize=memory -fsanitize-memory-use-after-dtor -disable-llvm-passes -std=c++11 -triple=x86_64-pc-linux -emit-llvm -o - %s | FileCheck %s
// RUN: %clang_cc1 -O1 -fsanitize=memory -fsanitize-memory-use-after-dtor -disable-llvm-passes -std=c++11 -triple=x86_64-pc-linux -emit-llvm -o - %s | FileCheck %s
// TODO Success pending on resolution of issue:
// https://github.com/google/sanitizers/issues/596
// XFAIL: *
struct Trivial {
int a;
int b;
};
Trivial t;
// CHECK: call void @__sanitizer_dtor_callback
| {
"pile_set_name": "Github"
} |
/* μlogger
*
* Copyright(C) 2017 Bartek Fabiszewski (www.fabiszewski.net)
*
* This is free software; you can redistribute it and/or modify it under
* the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
html {
height: 100%;
}
body {
height: 100%;
margin: 0;
padding: 0;
background-color: #666;
}
a {
cursor: pointer;
text-decoration: none;
color: #bce;
}
:link, :visited {
color: #bce;
}
select {
font-weight: normal;
width: 150px;
padding-top: 0.2em;
}
#container {
display: flex;
height: 100%;
}
#main {
flex-grow: 1;
order: 1;
height: 100%;
}
#map-canvas {
height: 100%;
}
#menu {
font-family: "Open Sans", Verdana, sans-serif;
font-size: 0.7em;
font-weight: bold;
float: right;
overflow-x: hidden;
overflow-y: auto;
order: 2;
width: 165px;
height: 100%;
color: white;
background-color: #666;
-moz-osx-font-smoothing: grayscale;
-webkit-font-smoothing: antialiased;
}
#menu-content {
padding: 10px 0 3em 10px;
}
#footer {
line-height: 3em;
position: fixed;
bottom: 0;
width: 165px;
padding-left: 10px;
color: lightgray;
background-color: rgba(102, 102, 102, 0.9);
}
#menu-button {
font-size: 28px;
font-weight: normal;
line-height: 28px;
position: absolute;
z-index: 1900;
top: 5px;
right: 0;
width: 30px;
height: 35px;
cursor: pointer;
text-align: center;
border-width: 1px 0 1px 1px;
border-style: solid;
border-color: #bce;
border-radius: 11px 0 0 11px;
background-color: #666;
}
#menu-button a {
color: white;
}
#menu-button a::after {
content: "»";
}
#menu.menu-hidden {
width: 0;
}
#menu.menu-hidden #menu-button {
font-weight: normal;
border-color: white;
background-color: rgba(0, 60, 136, 0.3);
}
#menu.menu-hidden #menu-button a::after {
content: "«";
}
#menu input,
#login input {
width: 150px;
text-align: center;
border: 1px solid black;
}
#menu input[type="submit"],
#login input[type="submit"] {
color: white;
border: 1px solid white;
background-color: black;
cursor: pointer;
}
#menu input[type="checkbox"] {
width: auto;
}
.menu-link {
display: block;
margin-top: 0.2em;
}
label[for=user] {
display: block;
padding-top: 1em;
}
.section {
display: block;
padding-bottom: 10px;
}
.section:first-child {
padding-top: 1em;
}
#input-file {
display: none;
}
#summary div {
padding-top: 0.3em;
}
#summary div img {
margin-bottom: -2px;
}
#login {
font-family: "Open Sans", Verdana, sans-serif;
font-size: 0.8em;
position: relative;
top: 10%;
width: 30%;
min-width: 200px;
margin: auto;
padding: 30px;
text-align: center;
color: white;
background-color: #444;
}
#title {
font-size: 1.3em;
padding-top: 0.6em;
padding-bottom: 0.5em;
}
#subtitle {
padding-bottom: 2em;
}
#error {
padding-top: 1.2em;
color: yellow;
}
#popup {
font-family: "Open Sans", Verdana, sans-serif;
max-width: 25em;
background-color: #666;
}
#pheader {
font-size: 0.9rem;
float: left;
padding-bottom: 0.5rem;
color: #bce;
}
#pheader div {
float: left;
padding-right: 2em;
}
#pheader div img {
background-image: radial-gradient(circle closest-side, #bfbfbc, #666);
}
#pbody {
font-size: 0.8rem;
line-height: 1.3rem;
clear: both;
padding-top: 0.2rem;
white-space: nowrap;
color: #e6e2e2;
border-top: 1px solid #bce;
}
#pcomments {
clear: both;
padding: 1em;
text-align: center;
white-space: normal;
color: #e6e6e6;
border-radius: 10px;
background-color: #777676;
}
#pimage {
text-align: center;
}
#pimage img {
max-width: 100%;
max-height: 25em;
cursor: pointer;
border-radius: 10px;
}
#pimage img:hover {
opacity: 0.7;
}
#pleft, #pright {
display: inline-block;
padding-top: 5px;
padding-right: 20px;
}
#pleft img {
background-image: radial-gradient(circle closest-side, #bfbfbc, #666);
}
#pbody .smaller {
font-size: 0.9em;
color: #cacaca;
}
#pfooter {
font-size: 0.6rem;
padding-top: 20px;
color: #f0f8ff;
}
#pfooter div:first-child {
width: 40%;
float: left;
}
#pfooter div:last-child {
width: 40%;
float: right;
text-align: right;
}
#bottom {
position: relative;
z-index: 10000;
display: none;
}
#chart {
font-family: "Open Sans", Verdana, sans-serif;
position: absolute;
right: 0;
bottom: -15px;
left: 0;
height: 200px;
padding: 0 10px;
opacity: 0.8;
background-color: white;
}
#chart-close {
position: absolute;
z-index: 10001;
right: 4px;
bottom: 166px;
cursor: pointer;
}
.mi {
font-style: italic;
padding-right: 0.1em;
color: white;
}
#modal {
font-family: "Open Sans", Verdana, sans-serif;
position: fixed;
z-index: 10010;
top: 0;
left: 0;
display: block;
overflow: auto;
width: 100%;
height: 100%;
background-color: black; /* fallback */
background-color: rgba(0, 0, 0, 0.4);
}
#modal-header {
position: absolute;
top: -10px;
right: 10px;
margin: 0 auto;
text-align: right;
}
#modal-header button {
border: none;
background-color: rgba(0, 0, 0, 0);
}
#modal-body {
position: absolute;
top: 50%;
left: 50%;
transform: translate(-50%, -50%) !important;
font-size: 0.9em;
min-width: 300px;
margin: 0 auto 15% auto;
padding: 1em;
color: white;
border: 1px solid #888;
-webkit-border-radius: 10px;
border-radius: 10px;
background-color: rgba(102, 102, 102, 0.9);
-moz-osx-font-smoothing: grayscale;
-webkit-font-smoothing: antialiased;
}
#modal-body .buttons {
padding-top: 1em;
}
#modal input[type=text],
#modal input[type=color],
#modal input[type=number],
#modal input[type=password] {
display: inline-block;
box-sizing: border-box;
width: 100%;
margin: 0.8em 0;
padding: 0.4em;
border: 1px solid #ccc;
-webkit-border-radius: 5px;
border-radius: 5px;
}
#modal.image {
overflow: hidden;
padding-top: 0;
background-color: rgba(45, 45, 45, 0.95);
}
#modal.image #modal-body img {
height: auto;
max-width: 90vw;
max-height: 87vh;
}
#modal.image #modal-body {
text-align: center;
background-color: rgb(45, 45, 45);
}
button {
font-weight: bold;
margin-right: 5px;
cursor: pointer;
color: white;
border: 1px solid white;
-webkit-border-radius: 5px;
border-radius: 5px;
background-color: #434343;
}
button > * {
pointer-events: none;
}
#cancel {
margin-top: 0.5em;
}
.red-button {
float: right;
padding: 0.1em 0.4em;
color: white;
-webkit-border-radius: 10px;
border-radius: 10px;
background-color: red;
}
#user-menu {
position: absolute;
display: block;
width: 130px;
padding: 1em;
border: 1px solid #888;
background-color: gray;
}
#user-menu.menu-hidden, a.menu-hidden {
display: none;
}
#user-menu a {
display: block;
padding-top: 0.5em;
padding-bottom: 0.5em;
}
.icon {
height: 1.4em;
margin-right: 4px;
vertical-align: text-top;
}
.menu-title {
text-decoration: underline;
}
#configForm label {
display: block;
}
#configForm label b {
display: inline-block;
text-align: right;
width: 250px;
margin-right: 10px;
font-size: small;
padding-top: 5px;
}
#configForm input[type=text],
#configForm input[type=number],
#configForm input[type=color],
#configForm select {
width: 150px;
margin: 3px 0;
padding: 2px 4px;
box-sizing: border-box;
}
#configForm input[type=checkbox] {
margin: 0;
}
#configForm select {
padding: 2px 0;
}
#configForm input[type=color] {
vertical-align: middle;
padding: 0;
}
#configForm img {
height: 13px;
vertical-align: middle;
margin: 0 5px;
}
.hidden {
display: none;
}
/* alert */
.alert {
position: fixed;
top: 0;
left: 50%;
width: 300px;
background: #666;
color: white;
font-family: "Open Sans", Verdana, sans-serif;
font-size: 0.8em;
line-height: 20px;
text-align: center;
-moz-osx-font-smoothing: grayscale;
-webkit-font-smoothing: antialiased;
margin: 1em 0 1em -150px;
padding: 6px 20px;
border-radius: 5px;
border-top: 1px solid #555;
box-shadow: 10px 10px 10px -8px rgba(0, 0, 0, 0.3);
z-index: 100000;
opacity: 0;
transition: all 1s;
}
.alert.error {
background: #d95b5b;
border-top: 1px solid #d05858;
}
.alert.in {
opacity: 1;
}
.alert.out {
opacity: 0;
}
.alert button {
position: absolute;
top: -1px;
right: 0;
border: none;
margin: 0;
height: 100%;
background: none;
font-weight: normal;
font-size: 15px;
}
.alert.spinner {
background-color: transparent;
border: none;
box-shadow: none;
}
.alert.spinner > span {
position: relative;
display: block;
width: 10px;
height: 10px;
border-radius: 5px;
background-color: #9880ff;
color: #9880ff;
animation: spinner-dot 1s infinite linear alternate;
animation-delay: 0.5s;
transform: translateZ(0);
-webkit-transform: translateZ(0);
-ms-transform: translateZ(0);
will-change: transform, opacity;
}
.alert.spinner > span::after {
left: 15px;
width: 10px;
height: 10px;
border-radius: 5px;
background-color: #9880ff;
color: #9880ff;
animation: spinner-dot 1s infinite alternate;
animation-delay: 1s;
}
.alert.spinner > span::before, .alert.spinner > span::after {
content: '';
display: inline-block;
position: absolute;
top: 0;
}
.alert.spinner > span::before {
left: -15px;
width: 10px;
height: 10px;
border-radius: 5px;
background-color: #9880ff;
color: #9880ff;
animation: spinner-dot 1s infinite alternate;
animation-delay: 0s;
}
@keyframes spinner-dot {
0% {
background-color: #9880ff;
}
50%, 100% {
background-color: #ebe6ff;
}
}
/* chart */
.ct-point {
transition: 0.3s;
stroke-width: 5px !important;
}
.ct-point:hover {
cursor: pointer;
stroke-width: 10px !important;
}
.ct-point-hilight {
stroke-width: 10px !important;
}
.ct-point-selected {
stroke-width: 10px !important;
stroke: #f4c63d !important;
}
.ct-line {
stroke-width: 2px !important;
}
.ct-axis-title {
font-size: 0.8em;
}
/* openlayers popup */
.ol-popup {
position: absolute;
bottom: 12px;
left: -50px;
min-width: 280px;
padding: 15px;
border: 1px solid #ccc;
border-radius: 10px;
background-color: #666;
-webkit-filter: drop-shadow(0 1px 4px rgba(0, 0, 0, 0.2));
filter: drop-shadow(0 1px 4px rgba(0, 0, 0, 0.2));
}
.ol-popup::after, .ol-popup::before {
position: absolute;
top: 100%;
width: 0;
height: 0;
content: " ";
pointer-events: none;
border: solid transparent;
}
.ol-popup::after {
left: 48px;
margin-left: -10px;
border-width: 10px;
border-top-color: #666;
}
.ol-popup::before {
left: 48px;
margin-left: -11px;
border-width: 11px;
border-top-color: #ccc;
}
.ol-popup-closer {
position: absolute;
top: -5px;
right: -10px;
width: 30px;
height: 30px;
background-image: url(../../images/close.svg) !important;
background-repeat: no-repeat !important;
}
.ol-overlay-container {
background-color: #666;
}
/* Google Maps InfoWindow */
.gm-style .gm-style-iw-c {
background-color: #666 !important;
overflow: visible !important;
}
.gm-style .gm-style-iw-t::after {
background: linear-gradient(45deg, rgb(102, 102, 102) 50%, rgba(255, 255, 255, 0) 51%, rgba(255, 255, 255, 0) 100%) !important;
}
.gm-style-iw button {
background-image: url(../../images/close.svg) !important;
background-repeat: no-repeat !important;
}
.gm-style-iw button img {
visibility: hidden;
}
.gm-style .gm-style-iw-d::-webkit-scrollbar-track,
.gm-style .gm-style-iw-d::-webkit-scrollbar-track-piece {
background: #666 !important;
}
#switcher {
position: absolute;
bottom: 12px;
left: 10px;
display: none;
min-width: 200px;
}
.ol-layerswitcher {
font-family: sans-serif;
font-size: 0.9em;
font-weight: bold;
margin: 1px;
padding: 0.5em;
color: #fff;
border: none;
border-radius: 2px;
background-color: rgba(0, 60, 136, 0.5);
}
.ol-layerswitcher:hover {
background-color: rgba(0, 60, 136, 0.7);
}
.ol-layerswitcher label {
display: block;
clear: both;
margin: 0.5em 0;
cursor: pointer;
}
.ol-layerswitcher label:hover {
color: #c8dcf2;
}
.ol-layerswitcher input {
margin-right: 1em;
}
label.ol-datalayer {
margin-top: 1.5em;
}
.ol-datalayer ~ .ol-datalayer {
margin-top: 0.5em;
}
.ol-switcher-button {
top: 6.6em;
left: 0.5em;
}
.ol-touch .ol-switcher-button {
top: 10em;
}
| {
"pile_set_name": "Github"
} |
# Should be last file loaded
pulseNode = (i, node) ->
return unless node.style
$node = $(node)
prePulseCss = $node.data('prePulseCss') ? node.style.cssText
prePulseBackgroundColor = $node.data('prePulseBackgroundColor') ? $node.css('backgroundColor')
$node.data(
'prePulseCss': prePulseCss
'prePulseBackgroundColor': prePulseBackgroundColor
).css('backgroundColor', 'rgba(255,0,0,0.5)').stop('pulseQueue', true).animate(
backgroundColor: prePulseBackgroundColor
,
duration: 'slow'
queue: 'pulseQueue'
done: (animation, jumpedToEnd) ->
node.style.cssText = prePulseCss
).dequeue 'pulseQueue'
if Meteor.settings?.public?.debug?.rendering
_.each Template, (template, name) ->
oldRendered = template.rendered
counter = 0
template.rendered = (args...) ->
FlashMessage.debug name, "render count: #{ ++counter }"
oldRendered.apply @, args if oldRendered
$(@findAll '> *').each pulseNode
| {
"pile_set_name": "Github"
} |
/*
* Author: Garrett Barboza <[email protected]>
*
* Copyright (c) 2014 Kaprica Security, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*/
#ifndef MALLOC_COMMON_H
#define MALLOC_COMMON_H
#include <stdint.h>
#define NUM_FREE_LISTS 32
#define HEADER_PADDING 24
#define NEW_CHUNK_SIZE 262144
#define ALIGNMENT 8
extern struct blk_t *free_lists[NUM_FREE_LISTS];
extern size_t size_class_limits[NUM_FREE_LISTS];
struct blk_t {
size_t size;
unsigned int free;
struct blk_t *fsucc;
struct blk_t *fpred;
struct blk_t *next;
struct blk_t *prev;
};
void coalesce(struct blk_t *);
int get_size_class(size_t size);
void insert_into_flist(struct blk_t *blk);
void remove_from_flist(struct blk_t *blk);
#endif /* MALLOC_COMMON_H */
| {
"pile_set_name": "Github"
} |
// Copyright 2008 Google Inc.
// All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Type and function utilities for implementing parameterized tests.
// GOOGLETEST_CM0001 DO NOT DELETE
#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_
#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_
#include <ctype.h>
#include <iterator>
#include <set>
#include <utility>
#include <vector>
#include "gtest/internal/gtest-internal.h"
#include "gtest/internal/gtest-linked_ptr.h"
#include "gtest/internal/gtest-port.h"
#include "gtest/gtest-printers.h"
namespace testing {
// Input to a parameterized test name generator, describing a test parameter.
// Consists of the parameter value and the integer parameter index.
template <class ParamType>
struct TestParamInfo {
TestParamInfo(const ParamType& a_param, size_t an_index) :
param(a_param),
index(an_index) {}
ParamType param;
size_t index;
};
// A builtin parameterized test name generator which returns the result of
// testing::PrintToString.
struct PrintToStringParamName {
template <class ParamType>
std::string operator()(const TestParamInfo<ParamType>& info) const {
return PrintToString(info.param);
}
};
namespace internal {
// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
//
// Outputs a message explaining invalid registration of different
// fixture class for the same test case. This may happen when
// TEST_P macro is used to define two tests with the same name
// but in different namespaces.
GTEST_API_ void ReportInvalidTestCaseType(const char* test_case_name,
CodeLocation code_location);
template <typename> class ParamGeneratorInterface;
template <typename> class ParamGenerator;
// Interface for iterating over elements provided by an implementation
// of ParamGeneratorInterface<T>.
template <typename T>
class ParamIteratorInterface {
public:
virtual ~ParamIteratorInterface() {}
// A pointer to the base generator instance.
// Used only for the purposes of iterator comparison
// to make sure that two iterators belong to the same generator.
virtual const ParamGeneratorInterface<T>* BaseGenerator() const = 0;
// Advances iterator to point to the next element
// provided by the generator. The caller is responsible
// for not calling Advance() on an iterator equal to
// BaseGenerator()->End().
virtual void Advance() = 0;
// Clones the iterator object. Used for implementing copy semantics
// of ParamIterator<T>.
virtual ParamIteratorInterface* Clone() const = 0;
// Dereferences the current iterator and provides (read-only) access
// to the pointed value. It is the caller's responsibility not to call
// Current() on an iterator equal to BaseGenerator()->End().
// Used for implementing ParamGenerator<T>::operator*().
virtual const T* Current() const = 0;
// Determines whether the given iterator and other point to the same
// element in the sequence generated by the generator.
// Used for implementing ParamGenerator<T>::operator==().
virtual bool Equals(const ParamIteratorInterface& other) const = 0;
};
// Class iterating over elements provided by an implementation of
// ParamGeneratorInterface<T>. It wraps ParamIteratorInterface<T>
// and implements the const forward iterator concept.
template <typename T>
class ParamIterator {
public:
typedef T value_type;
typedef const T& reference;
typedef ptrdiff_t difference_type;
// ParamIterator assumes ownership of the impl_ pointer.
ParamIterator(const ParamIterator& other) : impl_(other.impl_->Clone()) {}
ParamIterator& operator=(const ParamIterator& other) {
if (this != &other)
impl_.reset(other.impl_->Clone());
return *this;
}
const T& operator*() const { return *impl_->Current(); }
const T* operator->() const { return impl_->Current(); }
// Prefix version of operator++.
ParamIterator& operator++() {
impl_->Advance();
return *this;
}
// Postfix version of operator++.
ParamIterator operator++(int /*unused*/) {
ParamIteratorInterface<T>* clone = impl_->Clone();
impl_->Advance();
return ParamIterator(clone);
}
bool operator==(const ParamIterator& other) const {
return impl_.get() == other.impl_.get() || impl_->Equals(*other.impl_);
}
bool operator!=(const ParamIterator& other) const {
return !(*this == other);
}
private:
friend class ParamGenerator<T>;
explicit ParamIterator(ParamIteratorInterface<T>* impl) : impl_(impl) {}
scoped_ptr<ParamIteratorInterface<T> > impl_;
};
// ParamGeneratorInterface<T> is the binary interface to access generators
// defined in other translation units.
template <typename T>
class ParamGeneratorInterface {
public:
typedef T ParamType;
virtual ~ParamGeneratorInterface() {}
// Generator interface definition
virtual ParamIteratorInterface<T>* Begin() const = 0;
virtual ParamIteratorInterface<T>* End() const = 0;
};
// Wraps ParamGeneratorInterface<T> and provides general generator syntax
// compatible with the STL Container concept.
// This class implements copy initialization semantics and the contained
// ParamGeneratorInterface<T> instance is shared among all copies
// of the original object. This is possible because that instance is immutable.
template<typename T>
class ParamGenerator {
public:
typedef ParamIterator<T> iterator;
explicit ParamGenerator(ParamGeneratorInterface<T>* impl) : impl_(impl) {}
ParamGenerator(const ParamGenerator& other) : impl_(other.impl_) {}
ParamGenerator& operator=(const ParamGenerator& other) {
impl_ = other.impl_;
return *this;
}
iterator begin() const { return iterator(impl_->Begin()); }
iterator end() const { return iterator(impl_->End()); }
private:
linked_ptr<const ParamGeneratorInterface<T> > impl_;
};
// Generates values from a range of two comparable values. Can be used to
// generate sequences of user-defined types that implement operator+() and
// operator<().
// This class is used in the Range() function.
template <typename T, typename IncrementT>
class RangeGenerator : public ParamGeneratorInterface<T> {
public:
RangeGenerator(T begin, T end, IncrementT step)
: begin_(begin), end_(end),
step_(step), end_index_(CalculateEndIndex(begin, end, step)) {}
virtual ~RangeGenerator() {}
virtual ParamIteratorInterface<T>* Begin() const {
return new Iterator(this, begin_, 0, step_);
}
virtual ParamIteratorInterface<T>* End() const {
return new Iterator(this, end_, end_index_, step_);
}
private:
class Iterator : public ParamIteratorInterface<T> {
public:
Iterator(const ParamGeneratorInterface<T>* base, T value, int index,
IncrementT step)
: base_(base), value_(value), index_(index), step_(step) {}
virtual ~Iterator() {}
virtual const ParamGeneratorInterface<T>* BaseGenerator() const {
return base_;
}
virtual void Advance() {
value_ = static_cast<T>(value_ + step_);
index_++;
}
virtual ParamIteratorInterface<T>* Clone() const {
return new Iterator(*this);
}
virtual const T* Current() const { return &value_; }
virtual bool Equals(const ParamIteratorInterface<T>& other) const {
// Having the same base generator guarantees that the other
// iterator is of the same type and we can downcast.
GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
<< "The program attempted to compare iterators "
<< "from different generators." << std::endl;
const int other_index =
CheckedDowncastToActualType<const Iterator>(&other)->index_;
return index_ == other_index;
}
private:
Iterator(const Iterator& other)
: ParamIteratorInterface<T>(),
base_(other.base_), value_(other.value_), index_(other.index_),
step_(other.step_) {}
// No implementation - assignment is unsupported.
void operator=(const Iterator& other);
const ParamGeneratorInterface<T>* const base_;
T value_;
int index_;
const IncrementT step_;
}; // class RangeGenerator::Iterator
static int CalculateEndIndex(const T& begin,
const T& end,
const IncrementT& step) {
int end_index = 0;
for (T i = begin; i < end; i = static_cast<T>(i + step))
end_index++;
return end_index;
}
// No implementation - assignment is unsupported.
void operator=(const RangeGenerator& other);
const T begin_;
const T end_;
const IncrementT step_;
// The index for the end() iterator. All the elements in the generated
// sequence are indexed (0-based) to aid iterator comparison.
const int end_index_;
}; // class RangeGenerator
// Generates values from a pair of STL-style iterators. Used in the
// ValuesIn() function. The elements are copied from the source range
// since the source can be located on the stack, and the generator
// is likely to persist beyond that stack frame.
template <typename T>
class ValuesInIteratorRangeGenerator : public ParamGeneratorInterface<T> {
public:
template <typename ForwardIterator>
ValuesInIteratorRangeGenerator(ForwardIterator begin, ForwardIterator end)
: container_(begin, end) {}
virtual ~ValuesInIteratorRangeGenerator() {}
virtual ParamIteratorInterface<T>* Begin() const {
return new Iterator(this, container_.begin());
}
virtual ParamIteratorInterface<T>* End() const {
return new Iterator(this, container_.end());
}
private:
typedef typename ::std::vector<T> ContainerType;
class Iterator : public ParamIteratorInterface<T> {
public:
Iterator(const ParamGeneratorInterface<T>* base,
typename ContainerType::const_iterator iterator)
: base_(base), iterator_(iterator) {}
virtual ~Iterator() {}
virtual const ParamGeneratorInterface<T>* BaseGenerator() const {
return base_;
}
virtual void Advance() {
++iterator_;
value_.reset();
}
virtual ParamIteratorInterface<T>* Clone() const {
return new Iterator(*this);
}
// We need to use cached value referenced by iterator_ because *iterator_
// can return a temporary object (and of type other then T), so just
// having "return &*iterator_;" doesn't work.
// value_ is updated here and not in Advance() because Advance()
// can advance iterator_ beyond the end of the range, and we cannot
// detect that fact. The client code, on the other hand, is
// responsible for not calling Current() on an out-of-range iterator.
virtual const T* Current() const {
if (value_.get() == NULL)
value_.reset(new T(*iterator_));
return value_.get();
}
virtual bool Equals(const ParamIteratorInterface<T>& other) const {
// Having the same base generator guarantees that the other
// iterator is of the same type and we can downcast.
GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
<< "The program attempted to compare iterators "
<< "from different generators." << std::endl;
return iterator_ ==
CheckedDowncastToActualType<const Iterator>(&other)->iterator_;
}
private:
Iterator(const Iterator& other)
// The explicit constructor call suppresses a false warning
// emitted by gcc when supplied with the -Wextra option.
: ParamIteratorInterface<T>(),
base_(other.base_),
iterator_(other.iterator_) {}
const ParamGeneratorInterface<T>* const base_;
typename ContainerType::const_iterator iterator_;
// A cached value of *iterator_. We keep it here to allow access by
// pointer in the wrapping iterator's operator->().
// value_ needs to be mutable to be accessed in Current().
// Use of scoped_ptr helps manage cached value's lifetime,
// which is bound by the lifespan of the iterator itself.
mutable scoped_ptr<const T> value_;
}; // class ValuesInIteratorRangeGenerator::Iterator
// No implementation - assignment is unsupported.
void operator=(const ValuesInIteratorRangeGenerator& other);
const ContainerType container_;
}; // class ValuesInIteratorRangeGenerator
// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
//
// Default parameterized test name generator, returns a string containing the
// integer test parameter index.
template <class ParamType>
std::string DefaultParamName(const TestParamInfo<ParamType>& info) {
Message name_stream;
name_stream << info.index;
return name_stream.GetString();
}
// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
//
// Parameterized test name overload helpers, which help the
// INSTANTIATE_TEST_CASE_P macro choose between the default parameterized
// test name generator and user param name generator.
template <class ParamType, class ParamNameGenFunctor>
ParamNameGenFunctor GetParamNameGen(ParamNameGenFunctor func) {
return func;
}
template <class ParamType>
struct ParamNameGenFunc {
typedef std::string Type(const TestParamInfo<ParamType>&);
};
template <class ParamType>
typename ParamNameGenFunc<ParamType>::Type *GetParamNameGen() {
return DefaultParamName;
}
// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
//
// Stores a parameter value and later creates tests parameterized with that
// value.
template <class TestClass>
class ParameterizedTestFactory : public TestFactoryBase {
public:
typedef typename TestClass::ParamType ParamType;
explicit ParameterizedTestFactory(ParamType parameter) :
parameter_(parameter) {}
virtual Test* CreateTest() {
TestClass::SetParam(¶meter_);
return new TestClass();
}
private:
const ParamType parameter_;
GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestFactory);
};
// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
//
// TestMetaFactoryBase is a base class for meta-factories that create
// test factories for passing into MakeAndRegisterTestInfo function.
template <class ParamType>
class TestMetaFactoryBase {
public:
virtual ~TestMetaFactoryBase() {}
virtual TestFactoryBase* CreateTestFactory(ParamType parameter) = 0;
};
// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
//
// TestMetaFactory creates test factories for passing into
// MakeAndRegisterTestInfo function. Since MakeAndRegisterTestInfo receives
// ownership of test factory pointer, same factory object cannot be passed
// into that method twice. But ParameterizedTestCaseInfo is going to call
// it for each Test/Parameter value combination. Thus it needs meta factory
// creator class.
template <class TestCase>
class TestMetaFactory
: public TestMetaFactoryBase<typename TestCase::ParamType> {
public:
typedef typename TestCase::ParamType ParamType;
TestMetaFactory() {}
virtual TestFactoryBase* CreateTestFactory(ParamType parameter) {
return new ParameterizedTestFactory<TestCase>(parameter);
}
private:
GTEST_DISALLOW_COPY_AND_ASSIGN_(TestMetaFactory);
};
// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
//
// ParameterizedTestCaseInfoBase is a generic interface
// to ParameterizedTestCaseInfo classes. ParameterizedTestCaseInfoBase
// accumulates test information provided by TEST_P macro invocations
// and generators provided by INSTANTIATE_TEST_CASE_P macro invocations
// and uses that information to register all resulting test instances
// in RegisterTests method. The ParameterizeTestCaseRegistry class holds
// a collection of pointers to the ParameterizedTestCaseInfo objects
// and calls RegisterTests() on each of them when asked.
class ParameterizedTestCaseInfoBase {
public:
virtual ~ParameterizedTestCaseInfoBase() {}
// Base part of test case name for display purposes.
virtual const std::string& GetTestCaseName() const = 0;
// Test case id to verify identity.
virtual TypeId GetTestCaseTypeId() const = 0;
// UnitTest class invokes this method to register tests in this
// test case right before running them in RUN_ALL_TESTS macro.
// This method should not be called more then once on any single
// instance of a ParameterizedTestCaseInfoBase derived class.
virtual void RegisterTests() = 0;
protected:
ParameterizedTestCaseInfoBase() {}
private:
GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestCaseInfoBase);
};
// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
//
// ParameterizedTestCaseInfo accumulates tests obtained from TEST_P
// macro invocations for a particular test case and generators
// obtained from INSTANTIATE_TEST_CASE_P macro invocations for that
// test case. It registers tests with all values generated by all
// generators when asked.
template <class TestCase>
class ParameterizedTestCaseInfo : public ParameterizedTestCaseInfoBase {
public:
// ParamType and GeneratorCreationFunc are private types but are required
// for declarations of public methods AddTestPattern() and
// AddTestCaseInstantiation().
typedef typename TestCase::ParamType ParamType;
// A function that returns an instance of appropriate generator type.
typedef ParamGenerator<ParamType>(GeneratorCreationFunc)();
typedef typename ParamNameGenFunc<ParamType>::Type ParamNameGeneratorFunc;
explicit ParameterizedTestCaseInfo(
const char* name, CodeLocation code_location)
: test_case_name_(name), code_location_(code_location) {}
// Test case base name for display purposes.
virtual const std::string& GetTestCaseName() const { return test_case_name_; }
// Test case id to verify identity.
virtual TypeId GetTestCaseTypeId() const { return GetTypeId<TestCase>(); }
// TEST_P macro uses AddTestPattern() to record information
// about a single test in a LocalTestInfo structure.
// test_case_name is the base name of the test case (without invocation
// prefix). test_base_name is the name of an individual test without
// parameter index. For the test SequenceA/FooTest.DoBar/1 FooTest is
// test case base name and DoBar is test base name.
void AddTestPattern(const char* test_case_name,
const char* test_base_name,
TestMetaFactoryBase<ParamType>* meta_factory) {
tests_.push_back(linked_ptr<TestInfo>(new TestInfo(test_case_name,
test_base_name,
meta_factory)));
}
// INSTANTIATE_TEST_CASE_P macro uses AddGenerator() to record information
// about a generator.
int AddTestCaseInstantiation(const std::string& instantiation_name,
GeneratorCreationFunc* func,
ParamNameGeneratorFunc* name_func,
const char* file, int line) {
instantiations_.push_back(
InstantiationInfo(instantiation_name, func, name_func, file, line));
return 0; // Return value used only to run this method in namespace scope.
}
// UnitTest class invokes this method to register tests in this test case
// test cases right before running tests in RUN_ALL_TESTS macro.
// This method should not be called more then once on any single
// instance of a ParameterizedTestCaseInfoBase derived class.
// UnitTest has a guard to prevent from calling this method more then once.
virtual void RegisterTests() {
for (typename TestInfoContainer::iterator test_it = tests_.begin();
test_it != tests_.end(); ++test_it) {
linked_ptr<TestInfo> test_info = *test_it;
for (typename InstantiationContainer::iterator gen_it =
instantiations_.begin(); gen_it != instantiations_.end();
++gen_it) {
const std::string& instantiation_name = gen_it->name;
ParamGenerator<ParamType> generator((*gen_it->generator)());
ParamNameGeneratorFunc* name_func = gen_it->name_func;
const char* file = gen_it->file;
int line = gen_it->line;
std::string test_case_name;
if ( !instantiation_name.empty() )
test_case_name = instantiation_name + "/";
test_case_name += test_info->test_case_base_name;
size_t i = 0;
std::set<std::string> test_param_names;
for (typename ParamGenerator<ParamType>::iterator param_it =
generator.begin();
param_it != generator.end(); ++param_it, ++i) {
Message test_name_stream;
std::string param_name = name_func(
TestParamInfo<ParamType>(*param_it, i));
GTEST_CHECK_(IsValidParamName(param_name))
<< "Parameterized test name '" << param_name
<< "' is invalid, in " << file
<< " line " << line << std::endl;
GTEST_CHECK_(test_param_names.count(param_name) == 0)
<< "Duplicate parameterized test name '" << param_name
<< "', in " << file << " line " << line << std::endl;
test_param_names.insert(param_name);
test_name_stream << test_info->test_base_name << "/" << param_name;
MakeAndRegisterTestInfo(
test_case_name.c_str(),
test_name_stream.GetString().c_str(),
NULL, // No type parameter.
PrintToString(*param_it).c_str(),
code_location_,
GetTestCaseTypeId(),
TestCase::SetUpTestCase,
TestCase::TearDownTestCase,
test_info->test_meta_factory->CreateTestFactory(*param_it));
} // for param_it
} // for gen_it
} // for test_it
} // RegisterTests
private:
// LocalTestInfo structure keeps information about a single test registered
// with TEST_P macro.
struct TestInfo {
TestInfo(const char* a_test_case_base_name,
const char* a_test_base_name,
TestMetaFactoryBase<ParamType>* a_test_meta_factory) :
test_case_base_name(a_test_case_base_name),
test_base_name(a_test_base_name),
test_meta_factory(a_test_meta_factory) {}
const std::string test_case_base_name;
const std::string test_base_name;
const scoped_ptr<TestMetaFactoryBase<ParamType> > test_meta_factory;
};
typedef ::std::vector<linked_ptr<TestInfo> > TestInfoContainer;
// Records data received from INSTANTIATE_TEST_CASE_P macros:
// <Instantiation name, Sequence generator creation function,
// Name generator function, Source file, Source line>
struct InstantiationInfo {
InstantiationInfo(const std::string &name_in,
GeneratorCreationFunc* generator_in,
ParamNameGeneratorFunc* name_func_in,
const char* file_in,
int line_in)
: name(name_in),
generator(generator_in),
name_func(name_func_in),
file(file_in),
line(line_in) {}
std::string name;
GeneratorCreationFunc* generator;
ParamNameGeneratorFunc* name_func;
const char* file;
int line;
};
typedef ::std::vector<InstantiationInfo> InstantiationContainer;
static bool IsValidParamName(const std::string& name) {
// Check for empty string
if (name.empty())
return false;
// Check for invalid characters
for (std::string::size_type index = 0; index < name.size(); ++index) {
if (!isalnum(name[index]) && name[index] != '_')
return false;
}
return true;
}
const std::string test_case_name_;
CodeLocation code_location_;
TestInfoContainer tests_;
InstantiationContainer instantiations_;
GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestCaseInfo);
}; // class ParameterizedTestCaseInfo
// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
//
// ParameterizedTestCaseRegistry contains a map of ParameterizedTestCaseInfoBase
// classes accessed by test case names. TEST_P and INSTANTIATE_TEST_CASE_P
// macros use it to locate their corresponding ParameterizedTestCaseInfo
// descriptors.
class ParameterizedTestCaseRegistry {
public:
ParameterizedTestCaseRegistry() {}
~ParameterizedTestCaseRegistry() {
for (TestCaseInfoContainer::iterator it = test_case_infos_.begin();
it != test_case_infos_.end(); ++it) {
delete *it;
}
}
// Looks up or creates and returns a structure containing information about
// tests and instantiations of a particular test case.
template <class TestCase>
ParameterizedTestCaseInfo<TestCase>* GetTestCasePatternHolder(
const char* test_case_name,
CodeLocation code_location) {
ParameterizedTestCaseInfo<TestCase>* typed_test_info = NULL;
for (TestCaseInfoContainer::iterator it = test_case_infos_.begin();
it != test_case_infos_.end(); ++it) {
if ((*it)->GetTestCaseName() == test_case_name) {
if ((*it)->GetTestCaseTypeId() != GetTypeId<TestCase>()) {
// Complain about incorrect usage of Google Test facilities
// and terminate the program since we cannot guaranty correct
// test case setup and tear-down in this case.
ReportInvalidTestCaseType(test_case_name, code_location);
posix::Abort();
} else {
// At this point we are sure that the object we found is of the same
// type we are looking for, so we downcast it to that type
// without further checks.
typed_test_info = CheckedDowncastToActualType<
ParameterizedTestCaseInfo<TestCase> >(*it);
}
break;
}
}
if (typed_test_info == NULL) {
typed_test_info = new ParameterizedTestCaseInfo<TestCase>(
test_case_name, code_location);
test_case_infos_.push_back(typed_test_info);
}
return typed_test_info;
}
void RegisterTests() {
for (TestCaseInfoContainer::iterator it = test_case_infos_.begin();
it != test_case_infos_.end(); ++it) {
(*it)->RegisterTests();
}
}
private:
typedef ::std::vector<ParameterizedTestCaseInfoBase*> TestCaseInfoContainer;
TestCaseInfoContainer test_case_infos_;
GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestCaseRegistry);
};
} // namespace internal
} // namespace testing
#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_
| {
"pile_set_name": "Github"
} |
#!/usr/bin/env python
"""Functions for downloading and reading MNIST data."""
import gzip
from six.moves import xrange
from six.moves.urllib.request import urlretrieve
import numpy
import os
def _read32(bytestream):
dt = numpy.dtype(numpy.uint32).newbyteorder('>')
return numpy.frombuffer(bytestream.read(4), dtype=dt)[0]
def extract_images(filename):
"""Extract the images into a 4D uint8 numpy array [index, y, x, depth]."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
magic = _read32(bytestream)
if magic != 2051:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' %
(magic, filename))
num_images = _read32(bytestream)
rows = _read32(bytestream)
cols = _read32(bytestream)
buf = bytestream.read(rows * cols * num_images)
data = numpy.frombuffer(buf, dtype=numpy.uint8)
data = data.reshape(num_images, rows, cols, 1)
return data
def dense_to_one_hot(labels_dense, num_classes=10):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = numpy.arange(num_labels) * num_classes
labels_one_hot = numpy.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
def extract_labels(filename, one_hot=False):
"""Extract the labels into a 1D uint8 numpy array [index]."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
magic = _read32(bytestream)
if magic != 2049:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' %
(magic, filename))
num_items = _read32(bytestream)
buf = bytestream.read(num_items)
labels = numpy.frombuffer(buf, dtype=numpy.uint8)
if one_hot:
return dense_to_one_hot(labels)
return labels
class DataSet(object):
def __init__(self, images, labels, fake_data=False):
if fake_data:
self._num_examples = 10000
else:
assert images.shape[0] == labels.shape[0], (
"images.shape: %s labels.shape: %s" % (images.shape,
labels.shape))
self._num_examples = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
assert images.shape[3] == 1
images = images.reshape(images.shape[0],
images.shape[1] * images.shape[2])
# Convert from [0, 255] -> [0.0, 1.0].
images = images.astype(numpy.float32)
images = numpy.multiply(images, 1.0 / 255.0)
self._images = images
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
def next_batch(self, batch_size, fake_data=False):
"""Return the next `batch_size` examples from this data set."""
if fake_data:
fake_image = [1.0 for _ in xrange(784)]
fake_label = 0
return [fake_image for _ in xrange(batch_size)], [
fake_label for _ in xrange(batch_size)]
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Shuffle the data
perm = numpy.arange(self._num_examples)
numpy.random.shuffle(perm)
self._images = self._images[perm]
self._labels = self._labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
def read_data_sets(train_images_file, train_labels_file, test_images_file, test_labels_file, fake_data=False, one_hot=False):
class DataSets(object):
pass
data_sets = DataSets()
if fake_data:
data_sets.train = DataSet([], [], fake_data=True)
data_sets.validation = DataSet([], [], fake_data=True)
data_sets.test = DataSet([], [], fake_data=True)
return data_sets
TRAIN_IMAGES = train_images_file
TRAIN_LABELS = train_labels_file
TEST_IMAGES = test_images_file
TEST_LABELS = test_labels_file
VALIDATION_SIZE = 5000
train_images = extract_images(TRAIN_IMAGES)
train_labels = extract_labels(TRAIN_LABELS, one_hot=one_hot)
test_images = extract_images(TEST_IMAGES)
test_labels = extract_labels(TEST_LABELS, one_hot=one_hot)
validation_images = train_images[:VALIDATION_SIZE]
validation_labels = train_labels[:VALIDATION_SIZE]
train_images = train_images[VALIDATION_SIZE:]
train_labels = train_labels[VALIDATION_SIZE:]
data_sets.train = DataSet(train_images, train_labels)
data_sets.validation = DataSet(validation_images, validation_labels)
data_sets.test = DataSet(test_images, test_labels)
return data_sets
| {
"pile_set_name": "Github"
} |
<template lang="pug">
div
el-button(type="primary" @click="tab") New tab
</template>
<script>
export default {
data: () => ({
}),
computed: { },
created () { },
mounted () { },
methods: {
tab () {
chrome.tabs.create({ url: 'pages/app.html' })
}
}
}
</script>
<style lang="scss">
div {
color: blue
}
</style> | {
"pile_set_name": "Github"
} |
# Google Test #
[](https://travis-ci.org/google/googletest)
Welcome to **Google Test**, Google's C++ test framework!
This repository is a merger of the formerly separate GoogleTest and
GoogleMock projects. These were so closely related that it makes sense to
maintain and release them together.
Please see the project page above for more information as well as the
mailing list for questions, discussions, and development. There is
also an IRC channel on OFTC (irc.oftc.net) #gtest available. Please
join us!
**Google Mock** is an extension to Google Test for writing and using C++ mock
classes. See the separate [Google Mock documentation](googlemock/README.md).
More detailed documentation for googletest (including build instructions) are
in its interior [googletest/README.md](googletest/README.md) file.
## Features ##
* An [XUnit](https://en.wikipedia.org/wiki/XUnit) test framework.
* Test discovery.
* A rich set of assertions.
* User-defined assertions.
* Death tests.
* Fatal and non-fatal failures.
* Value-parameterized tests.
* Type-parameterized tests.
* Various options for running the tests.
* XML test report generation.
## Platforms ##
Google test has been used on a variety of platforms:
* Linux
* Mac OS X
* Windows
* Cygwin
* MinGW
* Windows Mobile
* Symbian
## Who Is Using Google Test? ##
In addition to many internal projects at Google, Google Test is also used by
the following notable projects:
* The [Chromium projects](http://www.chromium.org/) (behind the Chrome
browser and Chrome OS).
* The [LLVM](http://llvm.org/) compiler.
* [Protocol Buffers](https://github.com/google/protobuf), Google's data
interchange format.
* The [OpenCV](http://opencv.org/) computer vision library.
## Related Open Source Projects ##
[Google Test UI](https://github.com/ospector/gtest-gbar) is test runner that runs
your test binary, allows you to track its progress via a progress bar, and
displays a list of test failures. Clicking on one shows failure text. Google
Test UI is written in C#.
[GTest TAP Listener](https://github.com/kinow/gtest-tap-listener) is an event
listener for Google Test that implements the
[TAP protocol](https://en.wikipedia.org/wiki/Test_Anything_Protocol) for test
result output. If your test runner understands TAP, you may find it useful.
## Requirements ##
Google Test is designed to have fairly minimal requirements to build
and use with your projects, but there are some. Currently, we support
Linux, Windows, Mac OS X, and Cygwin. We will also make our best
effort to support other platforms (e.g. Solaris, AIX, and z/OS).
However, since core members of the Google Test project have no access
to these platforms, Google Test may have outstanding issues there. If
you notice any problems on your platform, please notify
<[email protected]>. Patches for fixing them are
even more welcome!
### Linux Requirements ###
These are the base requirements to build and use Google Test from a source
package (as described below):
* GNU-compatible Make or gmake
* POSIX-standard shell
* POSIX(-2) Regular Expressions (regex.h)
* A C++98-standard-compliant compiler
### Windows Requirements ###
* Microsoft Visual C++ v7.1 or newer
### Cygwin Requirements ###
* Cygwin v1.5.25-14 or newer
### Mac OS X Requirements ###
* Mac OS X v10.4 Tiger or newer
* XCode Developer Tools
### Requirements for Contributors ###
We welcome patches. If you plan to contribute a patch, you need to
build Google Test and its own tests from a git checkout (described
below), which has further requirements:
* [Python](https://www.python.org/) v2.3 or newer (for running some of
the tests and re-generating certain source files from templates)
* [CMake](https://cmake.org/) v2.6.4 or newer
## Regenerating Source Files ##
Some of Google Test's source files are generated from templates (not
in the C++ sense) using a script.
For example, the
file include/gtest/internal/gtest-type-util.h.pump is used to generate
gtest-type-util.h in the same directory.
You don't need to worry about regenerating the source files
unless you need to modify them. You would then modify the
corresponding `.pump` files and run the '[pump.py](googletest/scripts/pump.py)'
generator script. See the [Pump Manual](googletest/docs/PumpManual.md).
### Contributing Code ###
We welcome patches. Please read the
[Developer's Guide](googletest/docs/DevGuide.md)
for how you can contribute. In particular, make sure you have signed
the Contributor License Agreement, or we won't be able to accept the
patch.
Happy testing!
| {
"pile_set_name": "Github"
} |
require "spec_helper"
describe CC::Config::YAMLAdapter do
describe "#engines" do
it "moves engines to plugins" do
yaml = load_cc_yaml(<<-EOYAML)
engines:
rubocop:
enabled: true
EOYAML
expect(yaml.config).to eq(
"plugins" => {
"rubocop" => { "enabled" => true }
}
)
end
it "includes enabled plugins" do
yaml = load_cc_yaml(<<-EOYAML)
plugins:
rubocop:
enabled: true
eslint:
enabled: true
tslint:
enabled: false
EOYAML
expect(yaml.config["plugins"].length).to eq(3)
expect(yaml.config["plugins"].keys).to eq(
%w[rubocop eslint tslint],
)
end
it "supports a plugin:true|false shorthand" do
yaml = load_cc_yaml(<<-EOYAML)
plugins:
rubocop: true
eslint: false
EOYAML
plugins = yaml.config["plugins"]
expect(plugins["rubocop"]).to eq("enabled" => true)
expect(plugins["eslint"]).to eq("enabled" => false)
end
it "respects channel, and config" do
yaml = load_cc_yaml(<<-EOYAML)
plugins:
rubocop:
enabled: true
channel: beta
config:
yo: "sup"
EOYAML
_, config = yaml.config["plugins"].detect { |name, _| name == "rubocop" }
expect(config).to eq(
"enabled" => true, "channel" => "beta", "config" => { "yo" => "sup" },
)
end
it "re-writes as legacy file config values" do
yaml = load_cc_yaml(<<-EOYAML)
plugins:
rubocop:
enabled: true
config:
file: "foo.rb"
EOYAML
_, config = yaml.config["plugins"].detect { |name, _| name == "rubocop" }
expect(config).to eq(
"enabled" => true, "config" => "foo.rb",
)
end
it "respects legacy file config values" do
yaml = load_cc_yaml(<<-EOYAML)
plugins:
rubocop:
enabled: true
config: "foo.rb"
EOYAML
_, config = yaml.config["plugins"].detect { |name, _| name == "rubocop" }
expect(config).to eq(
"enabled" => true, "config" => "foo.rb",
)
end
it "updates legacy engine excludes" do
yaml = load_cc_yaml(<<-EOYAML)
plugins:
rubocop:
exclude_paths:
- foo
EOYAML
_, config = yaml.config["plugins"].detect { |name, _| name == "rubocop" }
expect(config).to eq(
"exclude_patterns" => ["foo"],
)
end
it "does not overwrite engine excludes with legacy" do
yaml = load_cc_yaml(<<-EOYAML)
plugins:
rubocop:
exclude_paths:
- bar
exclude_patterns:
- foo
EOYAML
_, config = yaml.config["plugins"].detect { |name, _| name == "rubocop" }
expect(config).to eq(
"exclude_paths" => ["bar"],
"exclude_patterns" => ["foo"],
)
end
end
describe "#exclude_patterns" do
it "uses explicitly-configured excludes when defined" do
yaml = load_cc_yaml(<<-EOYAML)
exclude_patterns:
- "**/*.rb"
- foo/
EOYAML
expect(yaml.config["exclude_patterns"]).to eq(%w[**/*.rb foo/])
end
it "converts legacy exclude_paths" do
yaml = load_cc_yaml(<<-EOYAML)
exclude_paths:
- "**/*.rb"
- foo/
EOYAML
expect(yaml.config["exclude_patterns"]).to eq(%w[**/*.rb foo/])
end
it "converts legacy engine exclude_paths from a string" do
yaml = load_cc_yaml(<<-EOYAML)
engines:
foo:
exclude_paths:
- "**/*.rb"
- foo/
EOYAML
expect(yaml.config["plugins"]["foo"]["exclude_patterns"]).to eq(%w[**/*.rb foo/])
end
it "converts legacy engine exclude_paths" do
yaml = load_cc_yaml(<<-EOYAML)
engines:
foo:
exclude_paths:
foo/
EOYAML
expect(yaml.config["plugins"]["foo"]["exclude_patterns"]).to eq(%w[foo/])
end
end
def load_cc_yaml(yaml)
Tempfile.open("") do |tmp|
tmp.puts(yaml)
tmp.rewind
described_class.load(tmp.path)
end
end
end
| {
"pile_set_name": "Github"
} |
import logging
from redash.destinations import *
enabled = True
try:
import pypd
except ImportError:
enabled = False
class PagerDuty(BaseDestination):
KEY_STRING = "{alert_id}_{query_id}"
DESCRIPTION_STR = "Alert: {alert_name}"
@classmethod
def enabled(cls):
return enabled
@classmethod
def configuration_schema(cls):
return {
"type": "object",
"properties": {
"integration_key": {
"type": "string",
"title": "PagerDuty Service Integration Key",
},
"description": {
"type": "string",
"title": "Description for the event, defaults to alert name",
},
},
"required": ["integration_key"],
}
@classmethod
def icon(cls):
return "creative-commons-pd-alt"
def notify(self, alert, query, user, new_state, app, host, options):
if alert.custom_subject:
default_desc = alert.custom_subject
elif options.get("description"):
default_desc = options.get("description")
else:
default_desc = self.DESCRIPTION_STR.format(alert_name=alert.name)
incident_key = self.KEY_STRING.format(alert_id=alert.id, query_id=query.id)
data = {
"routing_key": options.get("integration_key"),
"incident_key": incident_key,
"dedup_key": incident_key,
"payload": {
"summary": default_desc,
"severity": "error",
"source": "redash",
},
}
if alert.custom_body:
data["payload"]["custom_details"] = alert.custom_body
if new_state == "triggered":
data["event_action"] = "trigger"
elif new_state == "unknown":
logging.info("Unknown state, doing nothing")
return
else:
data["event_action"] = "resolve"
try:
ev = pypd.EventV2.create(data=data)
logging.warning(ev)
except Exception:
logging.exception("PagerDuty trigger failed!")
register(PagerDuty)
| {
"pile_set_name": "Github"
} |
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>Testcase, bug 430813</title>
<style type="text/css">
table, td { border-spacing: 0; margin: 0; padding: 0; }
td { background: aqua; }
</style>
</head>
<body style="border: 1px solid; width: 500px; height: 600px;">
<table style="margin-right: 50px"><tr><td>
This should be near the top of the page, not 200px down.
blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah blah
</td></tr></table>
</body></html>
| {
"pile_set_name": "Github"
} |
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Before you turn this problem in, make sure everything runs as expected. First, **restart the kernel** (in the menubar, select Kernel$\\rightarrow$Restart) and then **run all cells** (in the menubar, select Cell$\\rightarrow$Run All).\n",
"\n",
"Make sure you fill in any place that says `YOUR CODE HERE` or \"YOUR ANSWER HERE\", as well as your name and collaborators below:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"NAME = \"Ben Bitdiddle\"\n",
"COLLABORATORS = \"Alyssa P. Hacker\""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"---"
]
},
{
"cell_type": "markdown",
"metadata": {
"nbgrader": {
"cell_type": "markdown",
"checksum": "535c21960d4663d5edac398cb445d087",
"grade": false,
"grade_id": "jupyter",
"locked": true,
"schema_version": 3,
"solution": false
}
},
"source": [
"For this problem set, we'll be using the Jupyter notebook:\n",
"\n",
""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"---\n",
"## Part A (2 points)\n",
"\n",
"Write a function that returns a list of numbers, such that $x_i=i^2$, for $1\\leq i \\leq n$. Make sure it handles the case where $n<1$ by raising a `ValueError`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false,
"deletable": false,
"nbgrader": {
"cell_type": "code",
"checksum": "8f1eab8d02a9520920aa06f8a86a2492",
"grade": false,
"grade_id": "squares",
"locked": false,
"schema_version": 3,
"solution": true
}
},
"outputs": [],
"source": [
"def squares(n):\n",
" \"\"\"Compute the squares of numbers from 1 to n, such that the \n",
" ith element of the returned list equals i^2.\n",
" \n",
" \"\"\"\n",
" if n < 1:\n",
" raise ValueError\n",
" s = []\n",
" for i in range(n):\n",
" s.append(i**2)\n",
" return s"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Your function should print `[1, 4, 9, 16, 25, 36, 49, 64, 81, 100]` for $n=10$. Check that it does:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"squares(10)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false,
"deletable": false,
"nbgrader": {
"cell_type": "code",
"checksum": "8e029652317e6c6a37a72710dc8d2429",
"grade": true,
"grade_id": "correct_squares",
"locked": false,
"points": 1.0,
"schema_version": 3,
"solution": false
}
},
"outputs": [],
"source": [
"# \"\"\"Check that squares returns the correct output for several inputs\"\"\"\n",
"# assert squares(1) == [1]\n",
"# assert squares(2) == [1, 4]\n",
"# assert squares(10) == [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]\n",
"# assert squares(11) == [1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false,
"deletable": false,
"nbgrader": {
"cell_type": "code",
"checksum": "c6ff383fa27ce1c2eb97789816c93069",
"grade": true,
"grade_id": "squares_invalid_input",
"locked": false,
"points": 1.0,
"schema_version": 3,
"solution": false
}
},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"---\n",
"\n",
"## Part B (1 point)\n",
"\n",
"Using your `squares` function, write a function that computes the sum of the squares of the numbers from 1 to $n$. Your function should call the `squares` function -- it should NOT reimplement its functionality."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false,
"deletable": false,
"nbgrader": {
"cell_type": "code",
"checksum": "166e1abc6621f93f8084ec312c49f757",
"grade": false,
"grade_id": "sum_of_squares",
"locked": false,
"schema_version": 3,
"solution": true
}
},
"outputs": [],
"source": [
"def sum_of_squares(n):\n",
" \"\"\"Compute the sum of the squares of numbers from 1 to n.\"\"\"\n",
" total = 0\n",
" s = squares(n)\n",
" for i in range(len(s)):\n",
" total += s[i]\n",
" return total"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The sum of squares from 1 to 10 should be 385. Verify that this is the answer you get:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"sum_of_squares(10)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false,
"deletable": false,
"nbgrader": {
"cell_type": "code",
"checksum": "8cd2b086064694cb2352074a421b8964",
"grade": true,
"grade_id": "correct_sum_of_squares",
"locked": false,
"points": 0.5,
"schema_version": 3,
"solution": false
},
"tags": [
"raises-exception"
]
},
"outputs": [],
"source": [
"\"\"\"Check that sum_of_squares returns the correct answer for various inputs.\"\"\"\n",
"assert sum_of_squares(1) == 1\n",
"assert sum_of_squares(2) == 5\n",
"assert sum_of_squares(10) == 385\n",
"assert sum_of_squares(11) == 506"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false,
"deletable": false,
"nbgrader": {
"cell_type": "code",
"checksum": "b1e1ec376ddc63d803b109befacde06a",
"grade": true,
"grade_id": "sum_of_squares_uses_squares",
"locked": false,
"points": 0.5,
"schema_version": 3,
"solution": false
}
},
"outputs": [],
"source": [
"\"\"\"Check that sum_of_squares relies on squares.\"\"\"\n",
"orig_squares = squares\n",
"del squares\n",
"try:\n",
" sum_of_squares(1)\n",
"except NameError:\n",
" pass\n",
"else:\n",
" raise AssertionError(\"sum_of_squares does not use squares\")\n",
"finally:\n",
" squares = orig_squares"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"---\n",
"## Part C (1 point)\n",
"\n",
"Using LaTeX math notation, write out the equation that is implemented by your `sum_of_squares` function."
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": false,
"nbgrader": {
"cell_type": "markdown",
"checksum": "f3cc38a3e522c0be10852ebbe2a638b7",
"grade": true,
"grade_id": "sum_of_squares_equation",
"locked": false,
"points": 1.0,
"schema_version": 3,
"solution": true
}
},
"source": [
"$\\sum_{i=0}^n i^2$"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"---\n",
"## Part D (2 points)\n",
"\n",
"Find a usecase for your `sum_of_squares` function and implement that usecase in the cell below."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true,
"deletable": false,
"nbgrader": {
"cell_type": "code",
"checksum": "5a96910dbc324f5565edf92f5c98af1b",
"grade": true,
"grade_id": "sum_of_squares_application",
"locked": false,
"points": 2.0,
"schema_version": 3,
"solution": true
},
"tags": [
"raises-exception"
]
},
"outputs": [],
"source": [
"# YOUR CODE HERE\n",
"raise NotImplementedError()"
]
},
{
"cell_type": "markdown",
"metadata": {
"nbgrader": {
"grade": false,
"grade_id": "cell-938593c4a215c6cc",
"locked": true,
"points": 4,
"schema_version": 3,
"solution": false,
"task": true
}
},
"source": [
"---\n",
"## Part E (4 points)\n",
"\n",
"State the formulae for an arithmetic and geometric sum and verify them numerically for an example of your choice."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"$\\sum x^i = \\frac{1}{1-x}$"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python",
"language": "python",
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
| {
"pile_set_name": "Github"
} |
{
"component": true
}
| {
"pile_set_name": "Github"
} |
/**
* \file gcm.h
*
* \brief Galois/Counter mode for 128-bit block ciphers
*
* Copyright (C) 2006-2015, ARM Limited, All Rights Reserved
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This file is part of mbed TLS (https://tls.mbed.org)
*/
#ifndef MBEDTLS_GCM_H
#define MBEDTLS_GCM_H
#include "cipher.h"
#include <stdint.h>
#define MBEDTLS_GCM_ENCRYPT 1
#define MBEDTLS_GCM_DECRYPT 0
#define MBEDTLS_ERR_GCM_AUTH_FAILED -0x0012 /**< Authenticated decryption failed. */
#define MBEDTLS_ERR_GCM_BAD_INPUT -0x0014 /**< Bad input parameters to function. */
#ifdef __cplusplus
extern "C" {
#endif
/**
* \brief GCM context structure
*/
typedef struct {
mbedtls_cipher_context_t cipher_ctx;/*!< cipher context used */
uint64_t HL[16]; /*!< Precalculated HTable */
uint64_t HH[16]; /*!< Precalculated HTable */
uint64_t len; /*!< Total data length */
uint64_t add_len; /*!< Total add length */
unsigned char base_ectr[16];/*!< First ECTR for tag */
unsigned char y[16]; /*!< Y working value */
unsigned char buf[16]; /*!< buf working value */
int mode; /*!< Encrypt or Decrypt */
}
mbedtls_gcm_context;
/**
* \brief Initialize GCM context (just makes references valid)
* Makes the context ready for mbedtls_gcm_setkey() or
* mbedtls_gcm_free().
*
* \param ctx GCM context to initialize
*/
void mbedtls_gcm_init( mbedtls_gcm_context *ctx );
/**
* \brief GCM initialization (encryption)
*
* \param ctx GCM context to be initialized
* \param cipher cipher to use (a 128-bit block cipher)
* \param key encryption key
* \param keybits must be 128, 192 or 256
*
* \return 0 if successful, or a cipher specific error code
*/
int mbedtls_gcm_setkey( mbedtls_gcm_context *ctx,
mbedtls_cipher_id_t cipher,
const unsigned char *key,
unsigned int keybits );
/**
* \brief GCM buffer encryption/decryption using a block cipher
*
* \note On encryption, the output buffer can be the same as the input buffer.
* On decryption, the output buffer cannot be the same as input buffer.
* If buffers overlap, the output buffer must trail at least 8 bytes
* behind the input buffer.
*
* \param ctx GCM context
* \param mode MBEDTLS_GCM_ENCRYPT or MBEDTLS_GCM_DECRYPT
* \param length length of the input data
* \param iv initialization vector
* \param iv_len length of IV
* \param add additional data
* \param add_len length of additional data
* \param input buffer holding the input data
* \param output buffer for holding the output data
* \param tag_len length of the tag to generate
* \param tag buffer for holding the tag
*
* \return 0 if successful
*/
int mbedtls_gcm_crypt_and_tag( mbedtls_gcm_context *ctx,
int mode,
size_t length,
const unsigned char *iv,
size_t iv_len,
const unsigned char *add,
size_t add_len,
const unsigned char *input,
unsigned char *output,
size_t tag_len,
unsigned char *tag );
/**
* \brief GCM buffer authenticated decryption using a block cipher
*
* \note On decryption, the output buffer cannot be the same as input buffer.
* If buffers overlap, the output buffer must trail at least 8 bytes
* behind the input buffer.
*
* \param ctx GCM context
* \param length length of the input data
* \param iv initialization vector
* \param iv_len length of IV
* \param add additional data
* \param add_len length of additional data
* \param tag buffer holding the tag
* \param tag_len length of the tag
* \param input buffer holding the input data
* \param output buffer for holding the output data
*
* \return 0 if successful and authenticated,
* MBEDTLS_ERR_GCM_AUTH_FAILED if tag does not match
*/
int mbedtls_gcm_auth_decrypt( mbedtls_gcm_context *ctx,
size_t length,
const unsigned char *iv,
size_t iv_len,
const unsigned char *add,
size_t add_len,
const unsigned char *tag,
size_t tag_len,
const unsigned char *input,
unsigned char *output );
/**
* \brief Generic GCM stream start function
*
* \param ctx GCM context
* \param mode MBEDTLS_GCM_ENCRYPT or MBEDTLS_GCM_DECRYPT
* \param iv initialization vector
* \param iv_len length of IV
* \param add additional data (or NULL if length is 0)
* \param add_len length of additional data
*
* \return 0 if successful
*/
int mbedtls_gcm_starts( mbedtls_gcm_context *ctx,
int mode,
const unsigned char *iv,
size_t iv_len,
const unsigned char *add,
size_t add_len );
/**
* \brief Generic GCM update function. Encrypts/decrypts using the
* given GCM context. Expects input to be a multiple of 16
* bytes! Only the last call before mbedtls_gcm_finish() can be less
* than 16 bytes!
*
* \note On decryption, the output buffer cannot be the same as input buffer.
* If buffers overlap, the output buffer must trail at least 8 bytes
* behind the input buffer.
*
* \param ctx GCM context
* \param length length of the input data
* \param input buffer holding the input data
* \param output buffer for holding the output data
*
* \return 0 if successful or MBEDTLS_ERR_GCM_BAD_INPUT
*/
int mbedtls_gcm_update( mbedtls_gcm_context *ctx,
size_t length,
const unsigned char *input,
unsigned char *output );
/**
* \brief Generic GCM finalisation function. Wraps up the GCM stream
* and generates the tag. The tag can have a maximum length of
* 16 bytes.
*
* \param ctx GCM context
* \param tag buffer for holding the tag (may be NULL if tag_len is 0)
* \param tag_len length of the tag to generate
*
* \return 0 if successful or MBEDTLS_ERR_GCM_BAD_INPUT
*/
int mbedtls_gcm_finish( mbedtls_gcm_context *ctx,
unsigned char *tag,
size_t tag_len );
/**
* \brief Free a GCM context and underlying cipher sub-context
*
* \param ctx GCM context to free
*/
void mbedtls_gcm_free( mbedtls_gcm_context *ctx );
/**
* \brief Checkup routine
*
* \return 0 if successful, or 1 if the test failed
*/
int mbedtls_gcm_self_test( int verbose );
#ifdef __cplusplus
}
#endif
#endif /* gcm.h */
| {
"pile_set_name": "Github"
} |
#!/bin/sh
dev="./target/release/ambs --no-parent-ignore"
grep="grep --binary-files=without-match --color=auto -r"
ambs="ambs --no-parent-ignore"
rg="rg --no-heading --no-line-number"
hyperfine --warmup 3 "$dev EXPORT_SYMBOL_GPL ./data/linux" \
"$ambs EXPORT_SYMBOL_GPL ./data/linux" \
"$rg EXPORT_SYMBOL_GPL ./data/linux" \
"$grep EXPORT_SYMBOL_GPL ./data/linux"
hyperfine --warmup 3 "$dev irq_bypass_register_producer ./data/linux" \
"$ambs irq_bypass_register_producer ./data/linux" \
"$rg irq_bypass_register_producer ./data/linux" \
"$grep irq_bypass_register_producer ./data/linux"
hyperfine --warmup 3 "$dev 検索結果 ./data/jawiki-latest-pages-articles.xml" \
"$ambs 検索結果 ./data/jawiki-latest-pages-articles.xml" \
"$rg 検索結果 ./data/jawiki-latest-pages-articles.xml" \
"$grep 検索結果 ./data/jawiki-latest-pages-articles.xml"
hyperfine --warmup 3 "$dev \"Quick Search\" ./data/jawiki-latest-pages-articles.xml" \
"$ambs \"Quick Search\" ./data/jawiki-latest-pages-articles.xml" \
"$rg \"Quick Search\" ./data/jawiki-latest-pages-articles.xml" \
"$grep \"Quick Search\" ./data/jawiki-latest-pages-articles.xml"
| {
"pile_set_name": "Github"
} |
/*******************************************************************************
* Copyright (c) Microsoft Open Technologies, Inc.
* All Rights Reserved
* Licensed under the Apache License, Version 2.0.
* See License.txt in the project root for license information.
******************************************************************************/
package com.microsoft.aad.adal;
import org.apache.cordova.CallbackContext;
import org.apache.cordova.PluginResult;
import org.json.JSONException;
import org.json.JSONObject;
import static com.microsoft.aad.adal.SimpleSerialization.authenticationResultToJSON;
/**
* Class that provides implementation for passing AuthenticationResult from acquireToken* methods
* to Cordova JS code
*/
class DefaultAuthenticationCallback implements AuthenticationCallback<AuthenticationResult> {
/**
* Private field that stores cordova callback context which is used to send results back to JS
*/
private final CallbackContext callbackContext;
/**
* Default constructor
* @param callbackContext Cordova callback context which is used to send results back to JS
*/
DefaultAuthenticationCallback(CallbackContext callbackContext){
this.callbackContext = callbackContext;
}
/**
* Success callback that serializes AuthenticationResult instance and passes it to Cordova
* @param authResult AuthenticationResult instance
*/
@Override
public void onSuccess(AuthenticationResult authResult) {
JSONObject result;
try {
result = authenticationResultToJSON(authResult);
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.OK, result));
} catch (JSONException e) {
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.JSON_EXCEPTION,
"Failed to serialize Authentication result"));
}
}
/**
* Error callback that passes error to Cordova
* @param e AuthenticationException
*/
@Override
public void onError(Exception e) {
callbackContext.sendPluginResult(new PluginResult(PluginResult.Status.ERROR, e.getMessage()));
}
}
| {
"pile_set_name": "Github"
} |
-----BEGIN CERTIFICATE REQUEST-----
MIIBGDCBvwIBADA0MQswCQYDVQQGEwJOTDERMA8GA1UEChMIUG9sYXJTU0wxEjAQ
BgNVBAMTCWxvY2FsaG9zdDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABDfMVtl2
CR5acj7HWS3/IG7ufPkGkXTQrRS192giWWKSTuUA2CMR/+ov0jRdXRa9iojCa3cN
Vc2KKg76Aci07f+gKTAnBgkqhkiG9w0BCQ4xGjAYMAkGA1UdEwQCMAAwCwYDVR0P
BAQDAgXgMAoGCCqGSM49BAMEA0gAMEUCIQD8xdtluTiBJM50d/WvDeUvPbXOUMlL
8xEJXU2WOK+RLAIgS8U6Z8tlJpXLEisz/j4gdABG3Y3h4PBJjlpszFisTNo=
-----END CERTIFICATE REQUEST-----
| {
"pile_set_name": "Github"
} |
# <examples/doc_confidence_basic.py>
import numpy as np
import lmfit
x = np.linspace(0.3, 10, 100)
np.random.seed(0)
y = 1/(0.1*x) + 2 + 0.1*np.random.randn(x.size)
pars = lmfit.Parameters()
pars.add_many(('a', 0.1), ('b', 1))
def residual(p):
return 1/(p['a']*x) + p['b'] - y
mini = lmfit.Minimizer(residual, pars)
result = mini.minimize()
print(lmfit.fit_report(result.params))
ci = lmfit.conf_interval(mini, result)
lmfit.printfuncs.report_ci(ci)
# <end examples/doc_confidence_basic.py>
| {
"pile_set_name": "Github"
} |
X:1
T:Ebb Tide -- Hornpipe
R:hornpipe
B:Cole's 1000 Fiddle Tunes
M:2/4
L:1/16
K:C
uG2|c2(c"4"e) gage|gage gage|fafd egec|defd cBAG|
c2(ce) gage|gage gage|fafd egec|BdGB c2:|
(uef)|gece geag|fafd B2(de)|fdBG DGBd|"4"ecGE C2(ef)|
gece g"4"c'eg|agfe d2(de)|fagf edcB|[c2E2][c2E2][c2E2]:|
| {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 2012 - 2020 Splice Machine, Inc.
*
* This file is part of Splice Machine.
* Splice Machine is free software: you can redistribute it and/or modify it under the terms of the
* GNU Affero General Public License as published by the Free Software Foundation, either
* version 3, or (at your option) any later version.
* Splice Machine is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Affero General Public License for more details.
* You should have received a copy of the GNU Affero General Public License along with Splice Machine.
* If not, see <http://www.gnu.org/licenses/>.
*/
package com.splicemachine.derby.impl.sql.execute.operations.microstrategy;
import splice.com.google.common.collect.Sets;
import com.splicemachine.derby.test.framework.SpliceSchemaWatcher;
import com.splicemachine.derby.test.framework.SpliceWatcher;
import com.splicemachine.derby.test.framework.tables.SpliceCustomerTable;
import com.splicemachine.test.suites.MicrostrategiesTests;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.util.List;
import java.util.Set;
import static com.splicemachine.derby.test.framework.SpliceUnitTest.getResourceDirectory;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
@Category(MicrostrategiesTests.class)
public class MicrostrategiesCustomerIT {
private static final String SCHEMA = MicrostrategiesCustomerIT.class.getSimpleName().toUpperCase();
@ClassRule
public static SpliceSchemaWatcher spliceSchemaWatcher = new SpliceSchemaWatcher(SCHEMA);
@ClassRule
public static SpliceWatcher spliceClassWatcher = new SpliceWatcher(SCHEMA);
@Rule
public SpliceWatcher methodWatcher = new SpliceWatcher(SCHEMA);
@BeforeClass
public static void createSharedTableAndImportData() throws Exception {
spliceClassWatcher.executeUpdate("create table A" + SpliceCustomerTable.CREATE_STRING);
doImport();
}
private static void doImport() throws Exception {
PreparedStatement ps = spliceClassWatcher.prepareStatement("call SYSCS_UTIL.IMPORT_DATA (?, ?, null,?,',',null,null,null,null,1,null,true,null)");
ps.setString(1, SCHEMA);
ps.setString(2, "A");
ps.setString(3, getResourceDirectory() + "customer_iso.csv");
ResultSet rs = ps.executeQuery();
while (rs.next()) {
}
rs.close();
ps.close();
}
@Test
public void testRepeatedSelectDistinct() throws Exception {
for (int i = 1; i <= 10; i++) {
testSelectDistinct();
if (i % 3 == 0) {
// additional imports should not affect select distinct
doImport();
}
}
}
@Test
public void testSelectDistinct() throws Exception {
List<Integer> allCityIds = methodWatcher.queryList("select distinct cst_city_id from A");
Set<Integer> uniqueCityIds = Sets.newHashSet(allCityIds);
assertFalse("No City ids found!", uniqueCityIds.isEmpty());
assertEquals(allCityIds.size(), uniqueCityIds.size());
assertEquals(184, uniqueCityIds.size());
}
}
| {
"pile_set_name": "Github"
} |
---
id: version-4.0-extend_type
title: Extending a type
sidebar_label: Extending a type
original_id: extend_type
---
Fields exposed in a GraphQL type do not need to be all part of the same class.
Use the `@ExtendType` annotation to add additional fields to a type that is already declared.
<div class="alert alert-info">
Extending a type has nothing to do with type inheritance.
If you are looking for a way to expose a class and its children classes, have a look at
the <a href="inheritance-interfaces">Inheritance</a> section</a>
</div>
Let's assume you have a `Product` class. In order to get the name of a product, there is no `getName()` method in
the product because the name needs to be translated in the correct language. You have a `TranslationService` to do that.
```php
namespace App\Entities;
use TheCodingMachine\GraphQLite\Annotations\Field;
use TheCodingMachine\GraphQLite\Annotations\Type;
/**
* @Type()
*/
class Product
{
// ...
/**
* @Field()
*/
public function getId(): string
{
return $this->id;
}
/**
* @Field()
*/
public function getPrice(): ?float
{
return $this->price;
}
}
```
```php
// You need to use a service to get the name of the product in the correct language.
$name = $translationService->getProductName($productId, $language);
```
Using `@ExtendType`, you can add an additional `name` field to your product:
```php
namespace App\Types;
use TheCodingMachine\GraphQLite\Annotations\ExtendType;
use TheCodingMachine\GraphQLite\Annotations\Field;
use App\Entities\Product;
/**
* @ExtendType(class=Product::class)
*/
class ProductType
{
private $translationService;
public function __construct(TranslationServiceInterface $translationService)
{
$this->translationService = $translationService;
}
/**
* @Field()
*/
public function getName(Product $product, string $language): string
{
return $this->translationService->getProductName($product->getId(), $language);
}
}
```
Let's break this sample:
```php
/**
* @ExtendType(class=Product::class)
*/
```
With the `@ExtendType` annotation, we tell GraphQLite that we want to add fields in the GraphQL type mapped to
the `Product` PHP class.
```php
class ProductType
{
private $translationService;
public function __construct(TranslationServiceInterface $translationService)
{
$this->translationService = $translationService;
}
// ...
}
```
- The `ProductType` class must be in the types namespace. You configured this namespace when you installed GraphQLite.
- The `ProductType` class is actually a **service**. You can therefore inject dependencies in it (like the `$translationService` in this example)
<div class="alert alert-warning"><strong>Heads up!</strong> The <code>ProductType</code> class must exist in the container of your
application and the container identifier MUST be the fully qualified class name.<br/><br/>
If you are using the Symfony bundle (or a framework with autowiring like Laravel), this
is usually not an issue as the container will automatically create the controller entry if you do not explicitly
declare it.</div>
```php
/**
* @Field()
*/
public function getName(Product $product, string $language): string
{
return $this->translationService->getProductName($product->getId(), $language);
}
```
The `@Field` annotation is used to add the "name" field to the `Product` type.
Take a close look at the signature. The first parameter is the "resolved object" we are working on.
Any additional parameters are used as arguments.
Using the "[Type language](https://graphql.org/learn/schema/#type-language)" notation, we defined a type extension for
the GraphQL "Product" type:
```graphql
Extend type Product {
name(language: !String): String!
}
```
<div class="alert alert-success">Type extension is a very powerful tool. Use it to add fields that needs to be
computed from services not available in the entity.
</div>
| {
"pile_set_name": "Github"
} |
//
// Generated by class-dump 3.5 (64 bit) (Debug version compiled Oct 25 2017 03:49:04).
//
// class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2015 by Steve Nygard.
//
#import "TFileSystemOperationDelegate.h"
@interface TRenameOperationDelegate : TFileSystemOperationDelegate
{
struct TString _newName;
struct TriStateBool _extensionHiddenState;
struct TriStateBool _changeExtensionResponse;
_Bool _isBulkRename;
}
+ (_Bool)isReservedName:(const struct TString *)arg1 syntaxHint:(const struct NodeNameSyntaxHint *)arg2;
+ (int)validateNewName:(const struct TString *)arg1 forNode:(const struct TFENode *)arg2 isDisplayName:(_Bool)arg3 allowInteraction:(_Bool)arg4;
- (id).cxx_construct;
- (void).cxx_destruct;
- (int)configureNewName:(struct TString *)arg1 forNode:(const struct TFENode *)arg2 isDisplayName:(_Bool)arg3 allowInteraction:(_Bool)arg4;
- (int)asyncNodeOperation:(id)arg1 subOperationCompleted:(unsigned int)arg2 targetNode:(const struct TFENode *)arg3;
- (int)asyncNodeOperation:(id)arg1 errorNotification:(const struct TOperationMonitor *)arg2 error:(const struct OperationErrorRecord *)arg3 reply:(struct NodeEventReply *)arg4;
- (void)setIsBulkRename:(_Bool)arg1;
- (const struct TriStateBool *)extensionHiddenState;
- (const struct TString *)newName;
@end
| {
"pile_set_name": "Github"
} |
package org.zstack.sdk;
public class QueryLoadBalancerListenerResult {
public java.util.List inventories;
public void setInventories(java.util.List inventories) {
this.inventories = inventories;
}
public java.util.List getInventories() {
return this.inventories;
}
public java.lang.Long total;
public void setTotal(java.lang.Long total) {
this.total = total;
}
public java.lang.Long getTotal() {
return this.total;
}
}
| {
"pile_set_name": "Github"
} |
//------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All rights reserved.
//------------------------------------------------------------
namespace System.ServiceModel.Syndication
{
using System;
using System.Collections.Generic;
using System.Text;
using System.Xml;
using System.Xml.Schema;
using System.Xml.Serialization;
static class TextSyndicationContentKindHelper
{
public static bool IsDefined(TextSyndicationContentKind kind)
{
return (kind == TextSyndicationContentKind.Plaintext
|| kind == TextSyndicationContentKind.Html
|| kind == TextSyndicationContentKind.XHtml);
}
}
}
| {
"pile_set_name": "Github"
} |
Application time: 0.1160820 seconds
2012-04-06T23:42:46.515+0200: 0.188: [GC pause (young)
Desired survivor size 1048576 bytes, new threshold 15 (max 15)
, 0.00784501 secs]
[Parallel Time: 7.5 ms]
[GC Worker Start Time (ms): 187.8 187.8 187.8 187.8
Avg: 187.8, Min: 187.8, Max: 187.8, Diff: 0.0]
[Update RS (ms): 0.0 0.0 0.0 0.0
Avg: 0.0, Min: 0.0, Max: 0.0, Diff: 0.0]
[Processed Buffers : 0 1 5 0
Sum: 6, Avg: 1, Min: 0, Max: 5, Diff: 5]
[Ext Root Scanning (ms): 0.4 0.5 0.5 0.5
Avg: 0.5, Min: 0.4, Max: 0.5, Diff: 0.1]
[Mark Stack Scanning (ms): 0.0 0.0 0.0 0.0
Avg: 0.0, Min: 0.0, Max: 0.0, Diff: 0.0]
[Scan RS (ms): 0.0 0.0 0.0 0.0
Avg: 0.0, Min: 0.0, Max: 0.0, Diff: 0.0]
[Object Copy (ms): 7.0 6.9 6.9 6.9
Avg: 6.9, Min: 6.9, Max: 7.0, Diff: 0.1]
[Termination (ms): 0.0 0.0 0.0 0.1
Avg: 0.0, Min: 0.0, Max: 0.1, Diff: 0.1]
[Termination Attempts : 1 1 1 1
Sum: 4, Avg: 1, Min: 1, Max: 1, Diff: 0]
[GC Worker End Time (ms): 195.2 195.2 195.2 195.3
Avg: 195.3, Min: 195.2, Max: 195.3, Diff: 0.0]
[GC Worker Times (ms): 7.5 7.5 7.5 7.5
Avg: 7.5, Min: 7.5, Max: 7.5, Diff: 0.0]
[Parallel Other: 0.1 ms]
[Clear CT: 0.0 ms]
[Other: 0.3 ms]
[Choose CSet: 0.0 ms]
[Ref Proc: 0.2 ms]
[Ref Enq: 0.0 ms]
[Eden: 10M(10M)->0B(4096K) Survivors: 0B->2048K Heap: 10M(16M)->10096K(20M)]
[Times: user=0.06 sys=0.00, real=0.01 secs]
Total time for which application threads were stopped: 0.0081637 seconds
Application time: 0.0013498 seconds
Total time for which application threads were stopped: 0.0103000 seconds
2012-04-06T23:42:46.734+0200: 0.403: [GC concurrent-mark-end, 0.0024435 sec]
Application time: 0.0022977 seconds
2012-04-06T23:42:46.734+0200: 0.403: [GC remark 0.403: [GC ref-proc, 0.0000070 secs], 0.0005542 secs]
[Times: user=0.01 sys=0.00, real=0.00 secs]
Total time for which application threads were stopped: 0.0007062 seconds
2012-04-06T23:42:46.735+0200: 0.404: [GC concurrent-count-start]
Application time: 0.0009422 seconds
2012-04-06T23:42:46.672+0200: 0.342: [GC pause (partial)
Desired survivor size 524288 bytes, new threshold 1 (max 15)
- age 1: 3048912 bytes, 3048912 total
, 0.02648319 secs]
[Parallel Time: 26.0 ms]
[GC Worker Start Time (ms): 342.6 342.6 342.6 342.6
Avg: 342.6, Min: 342.6, Max: 342.6, Diff: 0.0]
[Update RS (ms): 0.0 0.0 0.0 0.0
Avg: 0.0, Min: 0.0, Max: 0.0, Diff: 0.0]
[Processed Buffers : 0 0 0 4
Sum: 4, Avg: 1, Min: 0, Max: 4, Diff: 4]
[Ext Root Scanning (ms): 0.5 0.3 0.4 0.3
Avg: 0.4, Min: 0.3, Max: 0.5, Diff: 0.2]
[Mark Stack Scanning (ms): 0.0 0.0 0.0 0.0
Avg: 0.0, Min: 0.0, Max: 0.0, Diff: 0.0]
[Scan RS (ms): 0.0 0.0 0.0 0.1
Avg: 0.0, Min: 0.0, Max: 0.1, Diff: 0.1]
[Object Copy (ms): 25.3 25.5 25.5 25.4
Avg: 25.4, Min: 25.3, Max: 25.5, Diff: 0.2]
[Termination (ms): 0.1 0.1 0.0 0.1
Avg: 0.1, Min: 0.0, Max: 0.1, Diff: 0.1]
[Termination Attempts : 1 1 1 1
Sum: 4, Avg: 1, Min: 1, Max: 1, Diff: 0]
[GC Worker End Time (ms): 368.5 368.5 368.5 368.5
Avg: 368.5, Min: 368.5, Max: 368.5, Diff: 0.0]
[GC Worker Times (ms): 26.0 25.9 25.9 25.9
Avg: 25.9, Min: 25.9, Max: 26.0, Diff: 0.0]
[Parallel Other: 0.1 ms]
[Clear CT: 0.0 ms]
[Other: 0.4 ms]
[Choose CSet: 0.0 ms]
[Ref Proc: 0.1 ms]
[Ref Enq: 0.0 ms]
[Eden: 3072K(3072K)->0B(28M) Survivors: 1024K->1024K Heap: 62M(128M)->58M(128M)]
[Times: user=0.06 sys=0.00, real=0.03 secs]
Total time for which application threads were stopped: 0.0267794 seconds
Application time: 0.0072551 seconds
| {
"pile_set_name": "Github"
} |
/*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package org.apertium.transfer;
import java.util.HashSet;
/**
*
* @author Jacob Nordfalk
*/
public class WordList {
private HashSet<String> elements;
private HashSet<String> elementsLowercase;
public WordList(String[] list) {
int cap = Math.max((int) (list.length / .75f) + 1, 16);
elements = new HashSet<String>(cap);
elementsLowercase = new HashSet<String>(cap);
for (String e : list) {
elements.add(e);
elementsLowercase.add(e.toLowerCase());
}
}
public boolean containsIgnoreCase(String source) {
return elementsLowercase.contains(source.toLowerCase());
}
public boolean contains(String source) {
return elements.contains(source);
}
public boolean containsIgnoreCaseBeginningWith(String source) {
String s = source.toLowerCase();
for (String e : elementsLowercase)
if (e.startsWith(s))
return true;
return false;
}
public boolean containsBeginningWith(String source) {
String s = source;
for (String e : elements)
if (e.startsWith(s))
return true;
return false;
}
public boolean containsIgnoreCaseEndingWith(String source) {
String s = source.toLowerCase();
for (String e : elementsLowercase)
if (e.endsWith(s))
return true;
return false;
}
public boolean containsEndingWith(String source) {
String s = source;
for (String e : elements)
if (e.endsWith(s))
return true;
return false;
}
}
| {
"pile_set_name": "Github"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.