max_stars_count
int64
301
224k
text
stringlengths
6
1.05M
token_count
int64
3
727k
3,055
/* Fontname: open_iconic_embedded_8x Copyright: https://github.com/iconic/open-iconic, SIL OPEN FONT LICENSE Glyphs: 17/17 BBX Build Mode: 0 */ const uint8_t u8g2_font_open_iconic_embedded_8x_t[1934] U8G2_FONT_SECTION("u8g2_font_open_iconic_embedded_8x_t") = "\21\0\6\6\7\7\6\5\10@@\0\0@\0@\0\0F\0\0\7q@F@\30\210\201\1\34\202" "\377\20\4\212@\20(\2A\240\10\4\201\42\20\4\212@\20(\2A\240\10\4\201\42\20\4\212\375\377" "\241\10\4\201\42\20\4\212@\20(\2A\240\10\4\201\42\20\4\212@\20(\2\301\377\20\0AU@" " \10\201\71D\355\200%-e\265\244\230SSrH\316\350\21\275B\20\10\376}\42O\254\33u$" "\216\264\231\64S\206\272T\226\252bM\256\10\366\310\32\332bKp\207\256\340\21\274\301'\370\202\37\360" "\17 \374\300\37\10\201\241b\207\354\230\31\265\2\67t\0B\217@ \10\201\71\304\255\260\225j\6\315" "\230\35\344`q(\204\352p@\216\303\244( \205\42\202=\62G\336\3\203`\221KrI-jb" "L(\10\3\241\36\316\203r@\16\351\311\201P\20%bE\12\212E\11*V\12C\37\226\262*\1" "\305\242\20\22K\222@(\210\363\220\36\222\3rP\17\347AA\30\10\65\61&\24krI\256\10\6" "\301\36\231#\357\201E\13\2B,\246\303\1\71\16\25Aq(\4\322\61\63h\246Za+n\4\7" "\0Co\240\37\34\201\31\306\304\240\30\23\203bL\14\212\61\61(\306\304\240\30\23\203bL\14\212\61" "\61(\316\300!\70\3\207\340\14\34\202\63p(\6\265\240\30\324\242ZP\213jQ)\252E\245\260\24" "\225\302RX\11Ka%\230O\340\31\70\304\246\320\34\30\344\222X\26\12\63i$\17\4zD\216\211" "A-*\305\205\300\16\231a#p\203Np\1\0Dh>\134\210\201=\301N\310\15\266bf\304\16" "W\262RR\213\251\31\71\241\307\23m\244\314d\251&V\344rd\13-\261\31\272\201\27\370\16\230\3" "\377\377\34\10\4q \20\304\201@\20\7\2A\34\10\4q \20\304\201@\20\7\2A\34\10\4q" " \20\304\201@\20\7\2A\34\10\4q \20\304\201@\20\7\2A\16\0EW\277\37\10\201_\1" "O\320\15\70\302V\320\14\331\21\77\177\261\343\23D\214\217\20)>C\204\370\20\221\341SD\204\217\21" "\11>\7\14q%,E\265\240\30SCrD\17\370\371\276\377\77\320\23vD\316\270!\65e\306\304" "\234\27\324\222VT\312:a%m\304\205\274\0F\236\300\33x\201\63\201/\324\23x\242\335\240\33\345" "\10\34\331\255\260\25L U\60\1\224\241\14N\206\62\70\31\12\241t \205\322\201\30H\307q \37" "\6B\204\30\211\20R(\302\10\261\370\20\2b`|\10\1\61\64\232\207\200\20\22\302l@\4\63\2" "\21\314\10^\241\63t\306#P\31\17A\242\60\220\306\307@\32\37\3a\204\12e\21*\224E\250P" "\24#\202I\214\10\6A\42\30Dyh\16\245\301\61\230\6\307p\26\36\2J\200\10(\1\42\240\4" "h\240\12$\201*\220\2\253@\12p\0G\214\70 \10\201\61\304\233x\25\355\242e\260\214\325\241:" "\322G\12\71#\247\304\224\224\24\222\62VFKx\11\61 \346\323x\232n\323q\266\5\242\330\26\310" "B[ \213\214\201\60\60\6\302\270\32\10\343j \215\312\201\64*\7\342\240\34\210cz \17\351\201" "<\42\10\2\201 \10\344\211 \220'\202D\34U\303bX\213Kq%\257\4v\304\216\12R\61*" "H\245\260 \26\302\202X\206\13b\31.\310%\300 \227\0\203\374\200\377\3>Hh@ \10\201\321" "C\275\230\231\344\346f\306\334\220\235\342\347`\250+\200A,\1\6\251\6\30\204\42`\220\251\200A$" "\3\6\211\16\331)\63if\231);ag\30\352Bw\235\260\23f\312.Db\273\240\327\377\322\2" "\321H\13\306B% \211\225\200 X\2rh\11\210\301-\30\6\270@\30b\307\314\250\25\270\241\6" "\0I\27@\30\210\201\1\34\202\377\377\220\37\360\377\377\1\11\4\377\377!\0J\261*`\12\201\227B" "i \17c\42T\200\14\257\321\71\70\210\26\311&\27\305B\4\214\12!\64&\4\301X\4\5\242h" " \4\2q\60\14\303\200@\30\4B@(\214@\21 \226\317\302\10$\12c\210 \14\3r\60\220" "G\301X\34\4\243i\10\14\210\21,&K\65\301\42\232\203{h\222\213R]$F\300\200\32\2\303" "q\20\214\345Q\60\22\310\301\60\42\10\203\220(\214`\262X\2\304\302\10\20\2Ba\14\206\1\201\60" "\14\2\201\70\30\210\240@\24\15\10A\60$\204\320\230\20\1\243\242X\223+\222A\64\7\327\350\30\236" "\2L\204\207\321@\26\64\0K\237@ \10\201\65\6LQ\61eG\4y\244\315\204\251*K\21\231" ".\310#\302,\215'\303\64\234\214\263h\64\317b\331@\13e\23)\22\216\244\70:S\342\350P\11" "\243C%\12O\225 <\26b\370X\11\341c%\204\317\205\20>\27\42\0])\14\375\357J\4\237" "\13!|.\204\360\261\22\302\307J\14\36\13Ax\252D\321\241\22F\207J\34\234)\201p$E\262" "\211\24\312\6Z,\232g\321d\234\205\223a\30\20fiD\27\344\61Y\212H\225\312L\33\311\3\211" "\34\23\243R`\206\6\0L'\70 \10\201\201\377\377\377\77@\25\330\6\332\17|P\300\376\1\377\7" "\310\210\231\21C\340!\61cF\374\200\377\3\42M^)\340\11\201\67\302x\0\25\35csd\220+" "bI*\351\71\4\42\342\34\4\210\223\345\70\2\205\3\11\20Nd(\70\222J\372\347\20\210\210s\20" " N\226\343\10\24\16$@\70\221\241\340H*\351\252\312t\211\60Q\6\312@\30\351\62U~\340\221" "\215\355\241A\260\250\14\222=\266\206\3\0N\251\70 \10\201\61\4\214\374\257\24$\210D\200\21\22D" "B\250\14\10\2\61L\210\3)R\30\10C\361\60\12D\301h\30\6\302`,\13\4q\60\24E\202" "H\224\24\204\242@\22\13bI\34\11\6\301$\12\5\203`\22E\242A\64\11\2\341 \15\11\7\341" "$\4\304\203$ \36$\1\361 \5\10\10\361DP\350\207F\314D\200\60\21\2\302D\10\10\23!" "$\310\304\200 \21DBL\20\12\60a$\300\204\241\360\22\210\202S$\12M\241\60,\306\302\240\30" "M\3j<\20\4\22\231&R%\302<\333\6\264\220\22Uq\1\0O\222@ \10\201\265E<\251" "\4\24&\32D\35\207\0\201\226\202CB\14\215\311\70,)\2\241D\24\210\220H\36\20\6iT\226" "\203\221Q\216\305\66\61\26^\244P\204\220\42!B\10E\30\31\24\241dH\204\223\1\21R\204\4X" "\21\20\240%H|\227\0\361a\2\4\216\270\23\67\2G\376\341\11\34\31\236\300\21x\2G\340I\267" "\342N\334\212\60P-\274A\265\340\12\24#CP\15KA= \7\25Q\42\225\12;i$/\24" "v\312LJ\201\31\32\0P^< \10\201\257\307\251\361D\330Ib\71\60\205f\340\4\13d\11 " "Z$h\200\215!a&\210\203z(\10.\341\10\310\201\37\370\27,Z\212\211\21\71\36h\23m\240" "\356\323)\24\305g\200\14\340\300\32\204\201\37\370\277\320@+b\306\353p\35-\204\205^\353p\35/" "#V\320\206\15\0\0\0\0\4\377\377\0";
3,459
1,405
<filename>sample4/recompiled_java/sources/org/apache/commons/httpclient/methods/FileRequestEntity.java<gh_stars>1000+ package org.apache.commons.httpclient.methods; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; public class FileRequestEntity implements RequestEntity { final File a; final String b; public FileRequestEntity(File file, String contentType) { if (file == null) { throw new IllegalArgumentException("File may not be null"); } this.a = file; this.b = contentType; } @Override // org.apache.commons.httpclient.methods.RequestEntity public long getContentLength() { return this.a.length(); } @Override // org.apache.commons.httpclient.methods.RequestEntity public String getContentType() { return this.b; } @Override // org.apache.commons.httpclient.methods.RequestEntity public boolean isRepeatable() { return true; } @Override // org.apache.commons.httpclient.methods.RequestEntity public void writeRequest(OutputStream out) throws IOException { byte[] tmp = new byte[4096]; InputStream instream = new FileInputStream(this.a); while (true) { try { int i = instream.read(tmp); if (i >= 0) { out.write(tmp, 0, i); } else { return; } } finally { instream.close(); } } } }
690
8,633
import pytest pytest.importorskip("redis")
17
416
<reponame>Zi0P4tch0/sdks // // ARSCNPlaneGeometry.h // ARKit // // Copyright © 2016-2020 Apple Inc. All rights reserved. // #import <Foundation/Foundation.h> #import <SceneKit/SCNGeometry.h> @protocol MTLDevice; @class ARPlaneGeometry; NS_ASSUME_NONNULL_BEGIN /** A SceneKit geometry representing a plane. */ API_AVAILABLE(ios(11.3)) @interface ARSCNPlaneGeometry : SCNGeometry /** Creates a new plane geometry using a Metal device. @param device A Metal device. @return A new plane geometry. */ + (nullable instancetype)planeGeometryWithDevice:(id<MTLDevice>)device; /** Updates the geometry with the vertices of a plane geometry. @param planeGeometry A plane geometry. */ - (void)updateFromPlaneGeometry:(ARPlaneGeometry *)planeGeometry; @end NS_ASSUME_NONNULL_END
284
2,338
//===--- ADRRelaxationPass.cpp --------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // //===----------------------------------------------------------------------===// #include "bolt/Passes/ADRRelaxationPass.h" #include "bolt/Core/ParallelUtilities.h" using namespace llvm; namespace opts { extern cl::OptionCategory BoltCategory; static cl::opt<bool> AdrPassOpt("adr-relaxation", cl::desc("Replace ARM non-local ADR instructions with ADRP"), cl::init(true), cl::cat(BoltCategory), cl::ReallyHidden); } // namespace opts namespace llvm { namespace bolt { void ADRRelaxationPass::runOnFunction(BinaryContext &BC, BinaryFunction &BF) { for (BinaryBasicBlock *BB : BF.layout()) { for (auto It = BB->begin(); It != BB->end(); ++It) { MCInst &Inst = *It; if (!BC.MIB->isADR(Inst)) continue; const MCSymbol *Symbol = BC.MIB->getTargetSymbol(Inst); if (!Symbol) continue; if (BF.hasIslandsInfo()) { BinaryFunction::IslandInfo &Islands = BF.getIslandInfo(); if (Islands.Symbols.count(Symbol) || Islands.ProxySymbols.count(Symbol)) continue; } BinaryFunction *TargetBF = BC.getFunctionForSymbol(Symbol); if (TargetBF && TargetBF == &BF) continue; MCPhysReg Reg; BC.MIB->getADRReg(Inst, Reg); int64_t Addend = BC.MIB->getTargetAddend(Inst); std::vector<MCInst> Addr = BC.MIB->materializeAddress(Symbol, BC.Ctx.get(), Reg, Addend); It = BB->replaceInstruction(It, Addr); } } } void ADRRelaxationPass::runOnFunctions(BinaryContext &BC) { if (!opts::AdrPassOpt || !BC.HasRelocations) return; ParallelUtilities::WorkFuncTy WorkFun = [&](BinaryFunction &BF) { runOnFunction(BC, BF); }; ParallelUtilities::runOnEachFunction( BC, ParallelUtilities::SchedulingPolicy::SP_TRIVIAL, WorkFun, nullptr, "ADRRelaxationPass", /* ForceSequential */ true); } } // end namespace bolt } // end namespace llvm
855
332
package io.github.quickmsg.core.protocol; import io.github.quickmsg.common.ack.Ack; import io.github.quickmsg.common.channel.MqttChannel; import io.github.quickmsg.common.context.ReceiveContext; import io.github.quickmsg.common.message.SmqttMessage; import io.github.quickmsg.common.protocol.Protocol; import io.netty.handler.codec.mqtt.MqttConnectMessage; import io.netty.handler.codec.mqtt.MqttMessageIdVariableHeader; import io.netty.handler.codec.mqtt.MqttMessageType; import io.netty.handler.codec.mqtt.MqttPubAckMessage; import reactor.core.publisher.Mono; import reactor.util.context.ContextView; import java.util.ArrayList; import java.util.List; import java.util.Optional; /** * @author luxurong */ public class PublishAckProtocol implements Protocol<MqttPubAckMessage> { private static List<MqttMessageType> MESSAGE_TYPE_LIST = new ArrayList<>(); static { MESSAGE_TYPE_LIST.add(MqttMessageType.PUBACK); } @Override public Mono<Void> parseProtocol(SmqttMessage<MqttPubAckMessage> smqttMessage, MqttChannel mqttChannel, ContextView contextView) { return Mono.fromRunnable(()->{ ReceiveContext<?> receiveContext = contextView.get(ReceiveContext.class); MqttPubAckMessage message = smqttMessage.getMessage(); MqttMessageIdVariableHeader idVariableHeader = message.variableHeader(); int messageId = idVariableHeader.messageId(); Optional.ofNullable(receiveContext.getTimeAckManager().getAck(mqttChannel.generateId(MqttMessageType.PUBLISH,messageId))) .ifPresent(Ack::stop); }); } @Override public List<MqttMessageType> getMqttMessageTypes() { return MESSAGE_TYPE_LIST; } }
678
14,668
// Copyright 2019 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package org.chromium.components.content_capture; import org.chromium.components.content_capture.PlatformSession.PlatformSessionData; /** * The task to remove the captured content from the platform. */ class ContentRemovedTask extends NotificationTask { private final long[] mRemovedIds; public ContentRemovedTask( FrameSession session, long[] removedIds, PlatformSession platformSession) { super(session, platformSession); mRemovedIds = removedIds; } @Override protected void runTask() { removeContent(); } private void removeContent() { log("ContentRemovedTask.removeContent"); PlatformSessionData platformSessionData = buildCurrentSession(); if (platformSessionData == null) return; PlatformAPIWrapper.getInstance().notifyViewsDisappeared( platformSessionData.contentCaptureSession, mPlatformSession.getRootPlatformSessionData().autofillId, mRemovedIds); } }
381
2,296
<reponame>lefty71/open-nsynth-super /* Copyright 2016 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifndef __STM32F0xx_HAL_CONF_H #define __STM32F0xx_HAL_CONF_H #ifdef __cplusplus extern "C" { #endif #define HAL_MODULE_ENABLED #define HAL_CORTEX_MODULE_ENABLED #define HAL_FLASH_MODULE_ENABLED #define HAL_GPIO_MODULE_ENABLED #define HAL_PWR_MODULE_ENABLED #define HAL_RCC_MODULE_ENABLED #if !defined (HSE_VALUE) #define HSE_VALUE ((uint32_t)8000000) #endif #if !defined (HSE_STARTUP_TIMEOUT) #define HSE_STARTUP_TIMEOUT ((uint32_t)100) #endif #if !defined (HSI_VALUE) #define HSI_VALUE ((uint32_t)8000000) #endif #if !defined (HSI_STARTUP_TIMEOUT) #define HSI_STARTUP_TIMEOUT ((uint32_t)5000) #endif #if !defined (HSI14_VALUE) #define HSI14_VALUE ((uint32_t)14000000) #endif #if !defined (HSI48_VALUE) #define HSI48_VALUE ((uint32_t)48000000) #endif #if !defined (LSI_VALUE) #define LSI_VALUE ((uint32_t)40000) #endif #if !defined (LSE_VALUE) #define LSE_VALUE ((uint32_t)32768) #endif #if !defined (LSE_STARTUP_TIMEOUT) #define LSE_STARTUP_TIMEOUT ((uint32_t)5000) #endif #define VDD_VALUE ((uint32_t)3300) #define TICK_INT_PRIORITY ((uint32_t)(1<<__NVIC_PRIO_BITS) - 1) #define USE_RTOS 0 #define PREFETCH_ENABLE 1 #define INSTRUCTION_CACHE_ENABLE 0 #define DATA_CACHE_ENABLE 0 #include "stm32f0xx_hal_rcc.h" #include "stm32f0xx_hal_gpio.h" #include "stm32f0xx_hal_dma.h" #include "stm32f0xx_hal_cortex.h" #include "stm32f0xx_hal_adc.h" #include "stm32f0xx_hal_flash.h" #include "stm32f0xx_hal_i2c.h" #include "stm32f0xx_hal_rtc.h" #include "stm32f0xx_hal_tim.h" #ifdef __cplusplus } #endif #define assert_param(expr) ((void)0) #endif // __STM32F0xx_HAL_CONF_H
918
488
// gb0006.cc // another permissive issue enum State { goodbit }; template <class T> void f(State &e) { e |= goodbit; }
49
430
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json from typing import Dict from pandas import DataFrame, concat from lib.case_line import convert_cases_to_time_series from lib.pipeline import DataSource from lib.time import datetime_isoformat from lib.utils import table_rename, aggregate_admin_level _column_adapter = { "txn_date": "date", "new_case": "new_confirmed", "total_case": "total_confirmed", "new_death": "new_deceased", "total_death": "total_deceased", "new_recovered": "new_recovered", "total_recovered": "total_recovered", "province": "match_string", } class ThailandCountryDataSource(DataSource): def parse_dataframes( self, dataframes: Dict[str, DataFrame], aux: Dict[str, DataFrame], **parse_opts ) -> DataFrame: data = table_rename(dataframes[0], _column_adapter, drop=True) # Add key and return data data["key"] = "TH" return data class ThailandProvinceDataSource(DataSource): def parse_dataframes( self, dataframes: Dict[str, DataFrame], aux: Dict[str, DataFrame], **parse_opts ) -> DataFrame: data = table_rename(dataframes[0], _column_adapter, drop=True) # Add country code and return data data["country_code"] = "TH" return data class ThailandCasesDataSource(DataSource): def parse_dataframes( self, dataframes: Dict[str, DataFrame], aux: Dict[str, DataFrame], **parse_opts ) -> DataFrame: cases = table_rename( dataframes[0], { # "no": "", "age": "age", "sex": "sex", # "nationality": "", # "province_of_isolation": "", # "notification_date": "date", "announce_date": "date_new_confirmed", "province_of_onset": "match_string", # "district_of_onset": "subregion2_name", # "quarantine": "", }, drop=True, remove_regex=r"[^0-9a-z\s]", ) print(cases) # Convert date to ISO format cases["date_new_confirmed"] = cases["date_new_confirmed"].astype(str).str.slice(0, 10) # Translate sex labels; only male, female and unknown are given sex_adapter = lambda x: {"ชาย": "male", "หญิง": "female"}.get(x, "sex_unknown") cases["sex"] = cases["sex"].apply(sex_adapter) # Convert from cases to time-series format data = convert_cases_to_time_series(cases, ["match_string"]) # Aggregate country-level data by adding all provinces country = ( data.drop(columns=["match_string"]).groupby(["date", "age", "sex"]).sum().reset_index() ) country["key"] = "TH" # Drop bogus records from the data data = data[data["match_string"].notna() & (data["match_string"] != "")] return concat([country, data])
1,437
1,840
/** * Copyright Pravega Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.pravega.cli.admin.dataRecovery; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import io.pravega.cli.admin.CommandArgs; import io.pravega.common.util.ByteArraySegment; import io.pravega.common.util.ImmutableDate; import io.pravega.segmentstore.contracts.AttributeId; import io.pravega.segmentstore.contracts.AttributeUpdate; import io.pravega.segmentstore.contracts.AttributeUpdateCollection; import io.pravega.segmentstore.contracts.AttributeUpdateType; import io.pravega.segmentstore.contracts.SegmentProperties; import io.pravega.segmentstore.contracts.StreamSegmentInformation; import io.pravega.segmentstore.server.containers.ContainerConfig; import io.pravega.segmentstore.server.logs.DataFrameBuilder; import io.pravega.segmentstore.server.logs.DataFrameRecord; import io.pravega.segmentstore.server.logs.DebugRecoveryProcessor; import io.pravega.segmentstore.server.logs.operations.DeleteSegmentOperation; import io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation; import io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation; import io.pravega.segmentstore.server.logs.operations.Operation; import io.pravega.segmentstore.server.logs.operations.OperationSerializer; import io.pravega.segmentstore.server.logs.operations.StorageMetadataCheckpointOperation; import io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation; import io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation; import io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation; import io.pravega.segmentstore.server.logs.operations.StreamSegmentTruncateOperation; import io.pravega.segmentstore.server.logs.operations.UpdateAttributesOperation; import io.pravega.segmentstore.server.reading.ReadIndexConfig; import io.pravega.segmentstore.storage.DataLogInitializationException; import io.pravega.segmentstore.storage.DebugDurableDataLogWrapper; import io.pravega.segmentstore.storage.DurableDataLog; import io.pravega.segmentstore.storage.DurableDataLogException; import io.pravega.segmentstore.storage.DurableDataLogFactory; import io.pravega.segmentstore.storage.impl.bookkeeper.BookKeeperConfig; import io.pravega.segmentstore.storage.impl.bookkeeper.BookKeeperLogFactory; import io.pravega.segmentstore.storage.impl.bookkeeper.DebugBookKeeperLogWrapper; import lombok.Cleanup; import lombok.Data; import lombok.Getter; import lombok.NonNull; import lombok.val; import org.apache.zookeeper.KeeperException; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.time.Duration; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.UUID; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.function.BiConsumer; /** * This command provides an administrator with the basic primitives to manipulate a DurableLog with damaged entries. * The workflow of this command is as follows: * 1. Checks if the Original Log is disabled (exit otherwise). * 2. Reads the original damaged DurableLog and creates a backup copy of it for safety reasons. * 3. Validate and buffer all the edits from admin to be done on the original DurableLog data (i.e., skip, delete, * replace operations). All these changes are then written on a Repair Log (i.e., original DurableLog data + admin changes). * 4. With the desired updates written in the Repair Log, the admin can replace the original DurableLog metadata by the * Repair Log's one. This will make the DurableLog for the Segment Container under repair to point to the Repair Log data. * 5. The backed-up data for the originally damaged DurableLog can be reused to create a new Repair Log or discarded if * the Segment Container recovers as expected. */ public class DurableDataLogRepairCommand extends DataRecoveryCommand { private final static Duration TIMEOUT = Duration.ofSeconds(10); /** * Creates a new instance of the DurableLogRepairCommand class. * * @param args The arguments for the command. */ public DurableDataLogRepairCommand(CommandArgs args) { super(args); } @Override public void execute() throws Exception { ensureArgCount(1); int containerId = getIntArg(0); val bkConfig = getCommandArgs().getState().getConfigBuilder() .include(BookKeeperConfig.builder().with(BookKeeperConfig.ZK_ADDRESS, getServiceConfig().getZkURL())) .build().getConfig(BookKeeperConfig::builder); @Cleanup val zkClient = createZKClient(); @Cleanup DurableDataLogFactory dataLogFactory = new BookKeeperLogFactory(bkConfig, zkClient, getCommandArgs().getState().getExecutor()); dataLogFactory.initialize(); // Open the Original Log in read-only mode. @Cleanup val originalDataLog = dataLogFactory.createDebugLogWrapper(containerId); // Check if the Original Log is disabled. if (originalDataLog.fetchMetadata().isEnabled()) { output("Original DurableLog is enabled. Repairs can only be done on disabled logs, exiting."); return; } // Make sure that the reserved id for Backup log is free before making any further progress. boolean createNewBackupLog = true; if (existsBackupLog(dataLogFactory)) { output("We found data in the Backup log, probably from a previous repair operation (or someone else running the same command at the same time). " + "You have three options: 1) Delete existing Backup Log and start a new repair process, " + "2) Keep existing Backup Log and re-use it for the current repair (i.e., skip creating a new Backup Log), " + "3) Quit."); switch (getIntUserInput("Select an option: [1|2|3]")) { case 1: // Delete everything related to the old Backup Log. try (DebugDurableDataLogWrapper backupDataLogDebugLogWrapper = dataLogFactory.createDebugLogWrapper(dataLogFactory.getBackupLogId())) { backupDataLogDebugLogWrapper.deleteDurableLogMetadata(); } break; case 2: // Keeping existing Backup Log, so not creating a new one. createNewBackupLog = false; break; default: output("Not doing anything with existing Backup Log this time."); return; } } // Create a new Backup Log if there wasn't any or if we removed the existing one. if (createNewBackupLog) { createBackupLog(dataLogFactory, containerId, originalDataLog); } int backupLogReadOperations = validateBackupLog(dataLogFactory, containerId, originalDataLog, createNewBackupLog); // Get user input of operations to skip, replace, or delete. List<LogEditOperation> durableLogEdits = getDurableLogEditsFromUser(); // Show the edits to be committed to the original durable log so the user can confirm. output("The following edits will be used to edit the Original Log:"); durableLogEdits.forEach(e -> output(e.toString())); output("Original DurableLog has been backed up correctly. Ready to apply admin-provided changes to the Original Log."); if (!confirmContinue()) { output("Not editing Original DurableLog this time. A Backup Log has been left during the process and you " + "will find it the next time this command gets executed."); return; } // Ensure that the Repair Log is going to start from a clean state. output("Deleting existing medatadata from Repair Log (if any)"); try (val editedLogWrapper = dataLogFactory.createDebugLogWrapper(dataLogFactory.getRepairLogId())) { editedLogWrapper.deleteDurableLogMetadata(); } catch (DurableDataLogException e) { if (e.getCause() instanceof KeeperException.NoNodeException) { output("Repair Log does not exist, so nothing to delete."); } else { outputError("Error happened while attempting to cleanup Repair Log metadata."); outputException(e); } } // Create a new Repair log to store the result of edits applied to the Original Log and instantiate the processor // that will write the edited contents into the Repair Log. try (DurableDataLog editedDataLog = dataLogFactory.createDurableDataLog(dataLogFactory.getRepairLogId()); EditingLogProcessor logEditState = new EditingLogProcessor(editedDataLog, durableLogEdits, getCommandArgs().getState().getExecutor()); DurableDataLog backupDataLog = dataLogFactory.createDebugLogWrapper(dataLogFactory.getBackupLogId()).asReadOnly()) { editedDataLog.initialize(TIMEOUT); readDurableDataLogWithCustomCallback(logEditState, dataLogFactory.getBackupLogId(), backupDataLog); Preconditions.checkState(!logEditState.isFailed); // After the edition has completed, we need to disable it before the metadata overwrite. editedDataLog.disable(); } catch (Exception ex) { outputError("There have been errors while creating the edited version of the DurableLog."); outputException(ex); throw ex; } // Validate the contents of the newly created Repair Log. int editedDurableLogOperations = validateRepairLog(dataLogFactory, backupLogReadOperations, durableLogEdits); // Overwrite the original DurableLog metadata with the edited DurableLog metadata. try (val editedLogWrapper = dataLogFactory.createDebugLogWrapper(dataLogFactory.getRepairLogId())) { output("Original DurableLog Metadata: " + originalDataLog.fetchMetadata()); output("Edited DurableLog Metadata: " + editedLogWrapper.fetchMetadata()); originalDataLog.forceMetadataOverWrite(editedLogWrapper.fetchMetadata()); output("New Original DurableLog Metadata (after replacement): " + originalDataLog.fetchMetadata()); } // Read the edited contents that are now reachable from the original log id. try (val editedLogWrapper = dataLogFactory.createDebugLogWrapper(dataLogFactory.getRepairLogId())) { int finalEditedLogReadOps = readDurableDataLogWithCustomCallback((op, list) -> output("Original Log Operations after repair: " + op), containerId, editedLogWrapper.asReadOnly()); output("Original DurableLog operations read (after editing): " + finalEditedLogReadOps); Preconditions.checkState(editedDurableLogOperations == finalEditedLogReadOps, "Repair Log operations not matching before (" + editedDurableLogOperations + ") and after the metadata overwrite (" + finalEditedLogReadOps + ")"); } catch (Exception ex) { outputError("Problem reading Original DurableLog after editing."); outputException(ex); } output("Process completed successfully! (You still need to enable the Durable Log so Pravega can use it)"); } private int validateRepairLog(DurableDataLogFactory dataLogFactory, int backupLogReadOperations, List<LogEditOperation> durableLogEdits) throws Exception { @Cleanup DurableDataLog editedDebugDataLogReadOnly = dataLogFactory.createDebugLogWrapper(dataLogFactory.getRepairLogId()).asReadOnly(); int editedDurableLogOperations = readDurableDataLogWithCustomCallback((op, list) -> output("Repair Log Operations: " + op), dataLogFactory.getRepairLogId(), editedDebugDataLogReadOnly); output("Edited DurableLog Operations read: " + editedDurableLogOperations); long expectedEditedLogOperations = backupLogReadOperations + durableLogEdits.stream().filter(edit -> edit.type.equals(LogEditType.ADD_OPERATION)).count() - durableLogEdits.stream().filter(edit -> edit.type.equals(LogEditType.DELETE_OPERATION)) .map(edit -> edit.finalOperationId - edit.initialOperationId).reduce(Long::sum).orElse(0L); Preconditions.checkState( expectedEditedLogOperations == editedDurableLogOperations, "Expected (" + expectedEditedLogOperations + ") and actual (" + editedDurableLogOperations + ") operations in Edited Log do not match"); return editedDurableLogOperations; } private int validateBackupLog(DurableDataLogFactory dataLogFactory, int containerId, DebugDurableDataLogWrapper originalDataLog, boolean createNewBackupLog) throws Exception { // Validate that the Original and Backup logs have the same number of operations. int operationsReadFromOriginalLog = readDurableDataLogWithCustomCallback((a, b) -> { }, containerId, originalDataLog.asReadOnly()); @Cleanup val validationBackupDataLog = dataLogFactory.createDebugLogWrapper(dataLogFactory.getBackupLogId()); @Cleanup val validationBackupDataLogReadOnly = validationBackupDataLog.asReadOnly(); int backupLogReadOperations = readDurableDataLogWithCustomCallback((a, b) -> output("Reading: " + a), dataLogFactory.getBackupLogId(), validationBackupDataLogReadOnly); output("Original DurableLog operations read: " + operationsReadFromOriginalLog + ", Backup DurableLog operations read: " + backupLogReadOperations); // Ensure that the Original log contains the same number of Operations than the Backup log upon a new log read. Preconditions.checkState(!createNewBackupLog || operationsReadFromOriginalLog == backupLogReadOperations, "Operations read from Backup Log (" + backupLogReadOperations + ") differ from Original Log ones (" + operationsReadFromOriginalLog + ") "); return backupLogReadOperations; } /** * Returns whether it exists a {@link DurableDataLog} with the reserved id for Backup Log (BACKUP_LOG_ID). * * @param dataLogFactory Factory to instantiate {@link DurableDataLog} instances. * @return Whether there is metadata for an existing Backup Log. * @throws DataLogInitializationException If there is an error initializing the {@link DurableDataLog}. */ private boolean existsBackupLog(DurableDataLogFactory dataLogFactory) throws Exception { try (DebugDurableDataLogWrapper backupDataLogDebugLogWrapper = dataLogFactory.createDebugLogWrapper(dataLogFactory.getBackupLogId())) { return backupDataLogDebugLogWrapper.fetchMetadata() != null; } } /** * Copies the contents of the input containerId and {@link DebugBookKeeperLogWrapper} on a new log with id BACKUP_LOG_ID. * * @param dataLogFactory Factory to instantiate {@link DurableDataLog} instances. * @param containerId Container id for the source log. * @param originalDataLog Source log. * @throws Exception If there is an error during backing up process. */ private void createBackupLog(DurableDataLogFactory dataLogFactory, int containerId, DebugDurableDataLogWrapper originalDataLog) throws Exception { // Create a new Backup Log to store the Original Log contents. @Cleanup DurableDataLog backupDataLog = dataLogFactory.createDurableDataLog(dataLogFactory.getBackupLogId()); backupDataLog.initialize(TIMEOUT); // Instantiate the processor that will back up the Original Log contents into the Backup Log. int operationsReadFromOriginalLog; try (BackupLogProcessor backupLogProcessor = new BackupLogProcessor(backupDataLog, getCommandArgs().getState().getExecutor()); DurableDataLog originalDataLogReadOnly = originalDataLog.asReadOnly()) { operationsReadFromOriginalLog = readDurableDataLogWithCustomCallback(backupLogProcessor, containerId, originalDataLogReadOnly); // The number of processed operation should match the number of read operations from DebugRecoveryProcessor. checkBackupLogAssertions(backupLogProcessor.getBeforeCommit().get(), backupLogProcessor.getCommitSuccess().get(), operationsReadFromOriginalLog, backupLogProcessor.isFailed); } catch (Exception e) { outputError("There have been errors while creating the Backup Log."); throw e; } } /** * Performs some basic correctness checks on a backup log. * * @param beforeCommitCalls Number of executions of beforeCommit callbacks during the processing. * @param commitSuccessCalls Number of executions of commitSuccess callbacks during the processing. * @param operationsReadFromOriginalLog Number of {@link Operation}s read from the Original Log. * @param isFailed Whether the {@link BackupLogProcessor} has found any failure during processing. */ @VisibleForTesting void checkBackupLogAssertions(long beforeCommitCalls, long commitSuccessCalls, long operationsReadFromOriginalLog, boolean isFailed) { Preconditions.checkState(beforeCommitCalls == commitSuccessCalls, "BackupLogProcessor has different number of processed (" + beforeCommitCalls + ") and successful operations (" + commitSuccessCalls + ")"); Preconditions.checkState(commitSuccessCalls == operationsReadFromOriginalLog, "BackupLogProcessor successful operations (" + commitSuccessCalls + ") differs from Original Log operations (" + operationsReadFromOriginalLog + ")"); Preconditions.checkState(!isFailed, "BackupLogProcessor has failed"); } /** * Verifies that the list of {@link LogEditOperation} is correct and complies with a set of rules. * * @param durableLogEdits List of {@link LogEditOperation} to check. */ @VisibleForTesting void checkDurableLogEdits(List<LogEditOperation> durableLogEdits) { long previousInitialId = Long.MIN_VALUE; long previousFinalId = Long.MIN_VALUE; LogEditType previousEditType = null; for (LogEditOperation logEditOperation: durableLogEdits) { // All LogEditOperations should target sequence numbers larger than 0. Preconditions.checkState(logEditOperation.getInitialOperationId() > 0); // For delete edits, the last id should be strictly larger than the initial id. Preconditions.checkState(logEditOperation.getType() != LogEditType.DELETE_OPERATION || logEditOperation.getInitialOperationId() < logEditOperation.getFinalOperationId()); // Next operation should start at a higher sequence number (i.e., we cannot have and "add" and a "replace" // edits for the same sequence number). The only exception are consecutive add edits. if (previousEditType != null) { boolean consecutiveAddEdits = previousEditType == LogEditType.ADD_OPERATION && logEditOperation.getType() == LogEditType.ADD_OPERATION; Preconditions.checkState(consecutiveAddEdits || logEditOperation.getInitialOperationId() > previousInitialId); // If the previous Edit Operation was Delete, then the next Operation initial sequence number should be > than // the final sequence number of the Delete operation. Preconditions.checkState(previousEditType != LogEditType.DELETE_OPERATION || logEditOperation.getInitialOperationId() >= previousFinalId); } // Check that Add Edit Operations have non-null payloads. Preconditions.checkState(logEditOperation.getType() == LogEditType.DELETE_OPERATION || logEditOperation.getNewOperation() != null); previousEditType = logEditOperation.getType(); previousInitialId = logEditOperation.getInitialOperationId(); previousFinalId = logEditOperation.getFinalOperationId(); } } /** * Guides the users to a set of options for creating {@link LogEditOperation}s that will eventually modify the * contents of the Original Log. * * @return List of {@link LogEditOperation}s. */ @VisibleForTesting List<LogEditOperation> getDurableLogEditsFromUser() { List<LogEditOperation> durableLogEdits = new ArrayList<>(); boolean finishInputCommands = false; while (!finishInputCommands) { try { final String operationTpe = getStringUserInput("Select edit action on DurableLog: [delete|add|replace]"); switch (operationTpe) { case "delete": long initialOpId = getLongUserInput("Initial operation id to delete? (inclusive)"); long finalOpId = getLongUserInput("Final operation id to delete? (exclusive)"); durableLogEdits.add(new LogEditOperation(LogEditType.DELETE_OPERATION, initialOpId, finalOpId, null)); break; case "add": initialOpId = getLongUserInput("At which Operation sequence number would you like to add new Operations?"); do { durableLogEdits.add(new LogEditOperation(LogEditType.ADD_OPERATION, initialOpId, initialOpId, createUserDefinedOperation())); output("You can add more Operations at this sequence number or not."); } while (confirmContinue()); break; case "replace": initialOpId = getLongUserInput("What Operation sequence number would you like to replace?"); durableLogEdits.add(new LogEditOperation(LogEditType.REPLACE_OPERATION, initialOpId, initialOpId, createUserDefinedOperation())); break; default: output("Invalid operation, please select one of [delete|add|replace]"); } checkDurableLogEdits(durableLogEdits); } catch (NumberFormatException ex) { outputError("Wrong input argument."); outputException(ex); } catch (IllegalStateException ex) { // Last input was incorrect, so remove it. output("Last Log Edit Operation did not pass the checks, removing it from list of edits."); durableLogEdits.remove(durableLogEdits.size() - 1); } catch (Exception ex) { outputError("Some problem has happened."); outputException(ex); } output("You can continue adding edits to the original DurableLog."); finishInputCommands = !confirmContinue(); } return durableLogEdits; } /** * Guides the user to generate a new {@link Operation} that will eventually modify the Original Log. * * @return New {@link Operation} to be added in the Original Log. */ @VisibleForTesting Operation createUserDefinedOperation() { Operation result; final String operations = "[DeleteSegmentOperation|MergeSegmentOperation|MetadataCheckpointOperation|" + "StorageMetadataCheckpointOperation|StreamSegmentAppendOperation|StreamSegmentMapOperation|" + "StreamSegmentSealOperation|StreamSegmentTruncateOperation|UpdateAttributesOperation]"; switch (getStringUserInput("Type one of the following Operations to instantiate: " + operations)) { case "DeleteSegmentOperation": long segmentId = getLongUserInput("Input Segment Id for DeleteSegmentOperation:"); result = new DeleteSegmentOperation(segmentId); long offset = getLongUserInput("Input Segment Offset for DeleteSegmentOperation:"); ((DeleteSegmentOperation) result).setStreamSegmentOffset(offset); break; case "MergeSegmentOperation": long targetSegmentId = getLongUserInput("Input Target Segment Id for MergeSegmentOperation:"); long sourceSegmentId = getLongUserInput("Input Source Segment Id for MergeSegmentOperation:"); result = new MergeSegmentOperation(targetSegmentId, sourceSegmentId, createAttributeUpdateCollection()); offset = getLongUserInput("Input Segment Offset for MergeSegmentOperation:"); ((MergeSegmentOperation) result).setStreamSegmentOffset(offset); break; case "MetadataCheckpointOperation": result = new MetadataCheckpointOperation(); ((MetadataCheckpointOperation) result).setContents(createOperationContents()); break; case "StorageMetadataCheckpointOperation": result = new StorageMetadataCheckpointOperation(); ((StorageMetadataCheckpointOperation) result).setContents(createOperationContents()); break; case "StreamSegmentAppendOperation": segmentId = getLongUserInput("Input Segment Id for StreamSegmentAppendOperation:"); offset = getLongUserInput("Input Segment Offset for StreamSegmentAppendOperation:"); result = new StreamSegmentAppendOperation(segmentId, offset, createOperationContents(), createAttributeUpdateCollection()); break; case "StreamSegmentMapOperation": result = new StreamSegmentMapOperation(createSegmentProperties()); break; case "StreamSegmentSealOperation": segmentId = getLongUserInput("Input Segment Id for StreamSegmentSealOperation:"); result = new StreamSegmentSealOperation(segmentId); offset = getLongUserInput("Input Segment Offset for StreamSegmentSealOperation:"); ((StreamSegmentSealOperation) result).setStreamSegmentOffset(offset); break; case "StreamSegmentTruncateOperation": segmentId = getLongUserInput("Input Segment Id for StreamSegmentTruncateOperation:"); offset = getLongUserInput("Input Offset for StreamSegmentTruncateOperation:"); result = new StreamSegmentTruncateOperation(segmentId, offset); break; case "UpdateAttributesOperation": segmentId = getLongUserInput("Input Segment Id for UpdateAttributesOperation:"); result = new UpdateAttributesOperation(segmentId, createAttributeUpdateCollection()); break; default: output("Invalid operation, please select one of " + operations); throw new UnsupportedOperationException(); } return result; } /** * Provides two ways of creating the payload of {@link Operation}s with binary content (MetadataCheckpointOperation, * StorageMetadataCheckpointOperation, StreamSegmentAppendOperation): i) zero, which means to provide a content of * a defined length consisting of just 0s, ii) file, which will read the contents of a specified file and use it as * payload for the {@link Operation}. * * @return Binary contents for the {@link Operation}. */ @VisibleForTesting ByteArraySegment createOperationContents() { ByteArraySegment content = null; do { try { switch (getStringUserInput("You are about to create the content for the new Operation. " + "The available options are i) generating 0s as payload (zero), " + "ii) load the contents from a provided file (file), iii) quit: [zero|file|quit]")) { case "zero": int contentLength = getIntUserInput("Input length of the Operation content: "); content = new ByteArraySegment(new byte[contentLength]); break; case "file": String path = getStringUserInput("Input the path for the file to use as Operation content:"); content = new ByteArraySegment(Files.readAllBytes(Path.of(path))); break; case "quit": throw new AbortedUserOperation(); default: output("Wrong option. Please, select one of the following options: [zero|file]"); } } catch (AbortedUserOperation ex) { output("Content generation operation aborted by user."); throw ex; } catch (Exception ex) { outputError("Some problem has happened."); outputException(ex); } } while (content == null); return content; } /** * Method to create a {@link SegmentProperties} object to fill a new {@link StreamSegmentMapOperation}. * * @return New {@link SegmentProperties} object with user-defined content. */ @VisibleForTesting SegmentProperties createSegmentProperties() { String segmentName = getStringUserInput("Input the name of the Segment: "); long offset = getLongUserInput("Input the offset of the Segment: "); long length = getLongUserInput("Input the length of the Segment: "); long storageLength = getLongUserInput("Input the storage length of the Segment: "); boolean sealed = getBooleanUserInput("Is the Segment sealed? [true/false]: "); boolean sealedInStorage = getBooleanUserInput("Is the Segment sealed in storage? [true/false]: "); boolean deleted = getBooleanUserInput("Is the Segment deleted? [true/false]: "); boolean deletedInStorage = getBooleanUserInput("Is the Segment deleted in storage? [true/false]: "); output("You are about to start adding Attributes to the SegmentProperties instance."); boolean finishInputCommands = !confirmContinue(); Map<AttributeId, Long> attributes = new HashMap<>(); while (!finishInputCommands) { output("Creating an AttributeUpdateCollection for this operation."); try { AttributeId attributeId = AttributeId.fromUUID(UUID.fromString(getStringUserInput("Input UUID for this Attribute: "))); long value = getLongUserInput("Input the Value for this Attribute:"); attributes.put(attributeId, value); } catch (NumberFormatException ex) { outputError("Wrong input argument."); outputException(ex); } catch (Exception ex) { outputError("Some problem has happened."); outputException(ex); } output("You can continue adding AttributeUpdates to the AttributeUpdateCollection."); finishInputCommands = !confirmContinue(); } long lastModified = getLongUserInput("Input last modified timestamp for the Segment (milliseconds): "); return StreamSegmentInformation.builder().name(segmentName).startOffset(offset).length(length).storageLength(storageLength) .sealed(sealed).deleted(deleted).sealedInStorage(sealedInStorage).deletedInStorage(deletedInStorage) .attributes(attributes).lastModified(new ImmutableDate(lastModified)).build(); } /** * Method to create a {@link AttributeUpdateCollection} object to fill the {@link Operation}s that require it. * * @return New {@link AttributeUpdateCollection} object with user-defined content. */ @VisibleForTesting AttributeUpdateCollection createAttributeUpdateCollection() { AttributeUpdateCollection attributeUpdates = new AttributeUpdateCollection(); output("You are about to start adding AttributeUpdates to the AttributeUpdateCollection."); boolean finishInputCommands = !confirmContinue(); while (!finishInputCommands) { output("Creating an AttributeUpdateCollection for this operation."); try { AttributeId attributeId = AttributeId.fromUUID(UUID.fromString(getStringUserInput("Input UUID for this AttributeUpdate: "))); AttributeUpdateType type = AttributeUpdateType.get((byte) getIntUserInput("Input AttributeUpdateType for this AttributeUpdate" + "(0 (None), 1 (Replace), 2 (ReplaceIfGreater), 3 (Accumulate), 4(ReplaceIfEquals)): ")); long value = getLongUserInput("Input the Value for this AttributeUpdate:"); long comparisonValue = getLongUserInput("Input the comparison Value for this AttributeUpdate:"); attributeUpdates.add(new AttributeUpdate(attributeId, type, value, comparisonValue)); } catch (NumberFormatException ex) { outputError("Wrong input argument."); outputException(ex); } catch (Exception ex) { outputError("Some problem has happened."); outputException(ex); } output("You can continue adding AttributeUpdates to the AttributeUpdateCollection."); finishInputCommands = !confirmContinue(); } return attributeUpdates; } /** * Reads a {@link DurableDataLog} associated with a container id and runs the callback on each {@link Operation} * read from the log. * * @param callback Callback to be run upon each {@link Operation} read. * @param containerId Container id to read from. * @param durableDataLog {@link DurableDataLog} of the Container to be read. * @return Number of {@link Operation}s read. * @throws Exception If there is a problem reading the {@link DurableDataLog}. */ @VisibleForTesting int readDurableDataLogWithCustomCallback(BiConsumer<Operation, List<DataFrameRecord.EntryInfo>> callback, int containerId, DurableDataLog durableDataLog) throws Exception { val logReaderCallbacks = new DebugRecoveryProcessor.OperationCallbacks( callback, op -> false, // We are not interested on doing actual recovery, just reading the operations. null, null); val containerConfig = getCommandArgs().getState().getConfigBuilder().build().getConfig(ContainerConfig::builder); val readIndexConfig = getCommandArgs().getState().getConfigBuilder().build().getConfig(ReadIndexConfig::builder); @Cleanup val rp = DebugRecoveryProcessor.create(containerId, durableDataLog, containerConfig, readIndexConfig, getCommandArgs().getState().getExecutor(), logReaderCallbacks); int operationsRead = rp.performRecovery(); output("Number of operations read from DurableLog: " + operationsRead); return operationsRead; } /** * This class provides the basic logic for reading from a {@link DurableDataLog} and writing the read {@link Operation}s * to another {@link DurableDataLog}. Internally, it uses a {@link DataFrameBuilder} to write to the target {@link DurableDataLog} * and performs one write at a time, waiting for the previous write to complete before issuing the next one. It also * provides counters to inform about the state of the processing as well as closing the resources. */ abstract class AbstractLogProcessor implements BiConsumer<Operation, List<DataFrameRecord.EntryInfo>>, AutoCloseable { protected final Map<Long, CompletableFuture<Void>> operationProcessingTracker = new ConcurrentHashMap<>(); @NonNull protected final DataFrameBuilder<Operation> dataFrameBuilder; @Getter protected boolean isFailed = false; @Getter private final AtomicBoolean closed = new AtomicBoolean(); @Getter private final AtomicInteger beforeCommit = new AtomicInteger(); @Getter private final AtomicInteger commitSuccess = new AtomicInteger(); @Getter private final AtomicInteger commitFailure = new AtomicInteger(); private final AtomicLong sequenceNumber = new AtomicLong(Long.MIN_VALUE); AbstractLogProcessor(DurableDataLog durableDataLog, ScheduledExecutorService executor) { DataFrameBuilder.Args args = new DataFrameBuilder.Args( a -> this.beforeCommit.getAndIncrement(), b -> { this.operationProcessingTracker.get(b.getLastFullySerializedSequenceNumber()).complete(null); this.commitSuccess.getAndIncrement(); }, (c, d) -> { this.operationProcessingTracker.get(d.getLastFullySerializedSequenceNumber()).complete(null); this.isFailed = true; // Consider a single failed write as a failure in the whole process. this.commitFailure.getAndIncrement(); }, executor); this.dataFrameBuilder = new DataFrameBuilder<>(durableDataLog, OperationSerializer.DEFAULT, args); } @Override public void close() { if (closed.compareAndSet(false, true)) { this.dataFrameBuilder.flush(); this.dataFrameBuilder.close(); this.operationProcessingTracker.clear(); } } /** * Writes an {@link Operation} to the {@link DataFrameBuilder} and wait for the {@link DataFrameBuilder.Args} * callbacks are invoked after the operation is written to the target {@link DurableDataLog}. * * @param operation {@link Operation} to be written and completed before continue with further writes. * @throws IOException If there is a problem writing the {@link Operation} to the target {@link DurableDataLog}. */ protected void writeAndConfirm(Operation operation) throws IOException { sequenceNumber.compareAndSet(Long.MIN_VALUE, operation.getSequenceNumber()); // We only consider writing operations with sequence number higher than the expected one. if (operation.getSequenceNumber() >= sequenceNumber.get()) { trackOperation(operation); this.dataFrameBuilder.append(operation); this.dataFrameBuilder.flush(); waitForOperationCommit(operation); sequenceNumber.incrementAndGet(); } else { outputError("Skipping (i.e., not writing) Operation with wrong Sequence Number: " + operation); } } private void trackOperation(Operation operation) { this.operationProcessingTracker.put(operation.getSequenceNumber(), new CompletableFuture<>()); } private void waitForOperationCommit(Operation operation) { this.operationProcessingTracker.get(operation.getSequenceNumber()).join(); this.operationProcessingTracker.remove(operation.getSequenceNumber()); } } /** * Writes all the {@link Operation}s passed in the callback to the target {@link DurableDataLog}. */ class BackupLogProcessor extends AbstractLogProcessor { BackupLogProcessor(DurableDataLog backupDataLog, ScheduledExecutorService executor) { super(backupDataLog, executor); } @Override public void accept(Operation operation, List<DataFrameRecord.EntryInfo> frameEntries) { try { output("Backing up: " + operation); writeAndConfirm(operation); } catch (Exception e) { outputError("Error writing Operation to Backup Log " + operation); outputException(e); isFailed = true; } } } /** * Given a list of sorted {@link LogEditOperation}, writes to the target {@link DurableDataLog} the {@link Operation}s * passed in the callback plus the result of applying the {@link LogEditOperation}. */ class EditingLogProcessor extends AbstractLogProcessor { private final List<LogEditOperation> durableLogEdits; private long newSequenceNumber = 1; // Operation sequence numbers start by 1. private int editIndex = 0; EditingLogProcessor(DurableDataLog editedDataLog, @NonNull List<LogEditOperation> durableLogEdits, ScheduledExecutorService executor) { super(editedDataLog, executor); this.durableLogEdits = durableLogEdits; } @Override public void accept(Operation operation, List<DataFrameRecord.EntryInfo> frameEntries) { try { // Nothing to edit, just write the Operations from the original log to the edited one. if (!hasEditsToApply(operation)) { operation.resetSequenceNumber(newSequenceNumber++); writeAndConfirm(operation); } else { // Edits to a DurableLog are sorted by their initial operation id and they are removed once they // have been applied. The only case in which we can find a DurableLog Operation with a sequence // number lower than the next DurableLog edit is that the data corruption issue we are trying to // repair induces duplication of DataFrames. if (checkDuplicateOperation(operation)) { // Skip processing of this operation. return; } // We have edits to do. LogEditOperation logEdit = this.durableLogEdits.get(this.editIndex); switch (logEdit.getType()) { case DELETE_OPERATION: // A Delete Edit Operation on a DurableLog consists of not writing the range of Operations // between its initial (inclusive) and final (exclusive) operation id. // Example: Original Log = [1, 2, 3, 4, 5] // delete 2-4 // Result Log = [1, 2 (4), 3 (5)] (former operation sequence number) applyDeleteEditOperation(operation, logEdit); break; case ADD_OPERATION: // An Add Edit Operation on a DurableLog consists of appending the desired Operation // encapsulated in the Add Edit Operation before the actual Operation contained in the log. // Note that we may want to add multiple new Operations at a specific position before the // actual one. // Example: Original Log = [1, 2, 3, 4, 5] // add 2, opA, opB, opC // Result Log = [1, 2 (opA), 3 (opB), 4 (opC), 5 (2), 6 (3), 7 (4), 8 (5)] (former operation sequence number) long currentInitialAddOperationId = logEdit.getInitialOperationId(); do { applyAddEditOperation(logEdit); output("Completed Add Edit Operation on DurableLog: " + logEdit); if (this.editIndex >= this.durableLogEdits.size()) { // Last Add Edit Operation was the last thing to do. break; } logEdit = this.durableLogEdits.get(this.editIndex); // Only continue if we have consecutive Add Edit Operations that refer to the same sequence number. } while (logEdit.getType().equals(LogEditType.ADD_OPERATION) && logEdit.getInitialOperationId() == currentInitialAddOperationId); // After all the additions are done, add the current log operation. operation.resetSequenceNumber(newSequenceNumber++); writeAndConfirm(operation); break; case REPLACE_OPERATION: // A Replace Edit Operation on a DurableLog consists of deleting the current Operation and // adding the new Operation encapsulated in the Replace Edit Operation. // Example: Original Log = [1, 2, 3, 4, 5] // replace 2, opA // Result Log = [1, 2 (opA), 3, 4, 5] (former operation sequence number) applyAddEditOperation(logEdit); output("Completed Replace Edit Operation on DurableLog: " + logEdit); break; default: outputError("Unknown DurableLog edit type, deleting edit operation: " + durableLogEdits.get(0).getType()); durableLogEdits.remove(0); } } } catch (Exception e) { outputError("Error serializing operation " + operation); outputException(e); isFailed = true; } } /** * Checks whether the current {@link Operation} to edit is a duplicate. * * @param operation {@link Operation} to check. * @return Whether the current {@link Operation} to edit is a duplicate or not. */ @VisibleForTesting boolean checkDuplicateOperation(Operation operation) { if (operation.getSequenceNumber() < durableLogEdits.get(this.editIndex).getInitialOperationId()) { outputError("Found an Operation with a lower sequence number than the initial" + "id of the next edit to apply. This may be symptom of a duplicated DataFrame and will" + "also duplicate the associated edit: " + operation); return true; } return false; } /** * Adds an {@link Operation}s to the target log and increments the edit index. * * @param logEdit @{@link LogEditOperation} of type {@link LogEditType#ADD_OPERATION} to be added to the target log. * @throws IOException If there is an error applying the "add" {@link LogEditOperation}. */ private void applyAddEditOperation(LogEditOperation logEdit) throws IOException { logEdit.getNewOperation().setSequenceNumber(newSequenceNumber++); writeAndConfirm(logEdit.getNewOperation()); this.editIndex++; } /** * Skips all the {@link Operation} from the original logs encompassed between the {@link LogEditOperation} * initial (inclusive) and final (exclusive) ids. When the last applicable delete has been applied, the edit * index is increased. * * @param operation {@link Operation} read from the log. * @param logEdit @{@link LogEditOperation} of type {@link LogEditType#DELETE_OPERATION} that defines the sequence * numbers of the {@link Operation}s to do not write to the target log. */ private void applyDeleteEditOperation(Operation operation, LogEditOperation logEdit) { output("Deleting operation from DurableLog: " + operation); if (logEdit.getFinalOperationId() == operation.getSequenceNumber() + 1) { // Once reached the end of the Delete Edit Operation range, go for the next edit. this.editIndex++; output("Completed Delete Edit Operation on DurableLog: " + logEdit); } } /** * Decides whether there are edits to apply on the log for the specific sequence id of the input {@link Operation}. * * @param op {@link Operation} to check if there are edits to apply. * @return Whether there are edits to apply to the log at the specific position of the input {@link Operation}. */ private boolean hasEditsToApply(Operation op) { if (this.editIndex == this.durableLogEdits.size()) { return false; } LogEditType editType = this.durableLogEdits.get(this.editIndex).getType(); long editInitialOpId = this.durableLogEdits.get(this.editIndex).getInitialOperationId(); long editFinalOpId = this.durableLogEdits.get(this.editIndex).getFinalOperationId(); return editInitialOpId == op.getSequenceNumber() || (editType.equals(LogEditType.DELETE_OPERATION) && editInitialOpId <= op.getSequenceNumber() && editFinalOpId >= op.getSequenceNumber()); } } /** * Available types of editing operations we can perform on a {@link DurableDataLog}. */ enum LogEditType { DELETE_OPERATION, ADD_OPERATION, REPLACE_OPERATION } /** * Information encapsulated by each edit to the target log. */ @Data static class LogEditOperation { private final LogEditType type; private final long initialOperationId; private final long finalOperationId; private final Operation newOperation; @Override public boolean equals(Object objToCompare) { if (!(objToCompare instanceof LogEditOperation)) { return false; } if (objToCompare == this) { return true; } LogEditOperation opToCompare = (LogEditOperation) objToCompare; return this.type == opToCompare.getType() && this.initialOperationId == opToCompare.getInitialOperationId() && this.finalOperationId == opToCompare.getFinalOperationId() && (this.type == LogEditType.DELETE_OPERATION || compareOperations(opToCompare.getNewOperation())); } private boolean compareOperations(Operation newOperationToCompare) { if (this.newOperation == newOperationToCompare) { return true; } if (this.newOperation == null || newOperationToCompare == null) { return false; } // Compare the main parts of an Operation for considering it equal. return this.newOperation.getSequenceNumber() == newOperationToCompare.getSequenceNumber() && this.newOperation.getType() == newOperationToCompare.getType(); } @Override public int hashCode() { return Long.hashCode(initialOperationId) + Long.hashCode(finalOperationId) + (type == null ? 0 : type.hashCode()); } } static class AbortedUserOperation extends RuntimeException { } public static CommandDescriptor descriptor() { return new CommandDescriptor(COMPONENT, "durableLog-repair", "Allows to repair DurableLog " + "damaged/corrupted Operations.", new ArgDescriptor("container-id", "Id of the Container to repair.")); } }
20,529
4,054
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.athenz.identityprovider.client; import com.yahoo.security.SignatureUtils; import com.yahoo.vespa.athenz.api.AthenzService; import com.yahoo.vespa.athenz.identityprovider.api.IdentityType; import com.yahoo.vespa.athenz.identityprovider.api.SignedIdentityDocument; import com.yahoo.vespa.athenz.identityprovider.api.VespaUniqueInstanceId; import java.nio.ByteBuffer; import java.security.GeneralSecurityException; import java.security.PrivateKey; import java.security.PublicKey; import java.security.Signature; import java.security.SignatureException; import java.time.Instant; import java.util.Base64; import java.util.Set; import java.util.TreeSet; import static java.nio.charset.StandardCharsets.UTF_8; /** * Generates and validates the signature for a {@link SignedIdentityDocument} * * @author bjorncs */ public class IdentityDocumentSigner { public String generateSignature(VespaUniqueInstanceId providerUniqueId, AthenzService providerService, String configServerHostname, String instanceHostname, Instant createdAt, Set<String> ipAddresses, IdentityType identityType, PrivateKey privateKey) { try { Signature signer = SignatureUtils.createSigner(privateKey); signer.initSign(privateKey); writeToSigner(signer, providerUniqueId, providerService, configServerHostname, instanceHostname, createdAt, ipAddresses, identityType); byte[] signature = signer.sign(); return Base64.getEncoder().encodeToString(signature); } catch (GeneralSecurityException e) { throw new RuntimeException(e); } } public boolean hasValidSignature(SignedIdentityDocument doc, PublicKey publicKey) { try { Signature signer = SignatureUtils.createVerifier(publicKey); signer.initVerify(publicKey); writeToSigner(signer, doc.providerUniqueId(), doc.providerService(), doc.configServerHostname(), doc.instanceHostname(), doc.createdAt(), doc.ipAddresses(), doc.identityType()); return signer.verify(Base64.getDecoder().decode(doc.signature())); } catch (GeneralSecurityException e) { throw new RuntimeException(e); } } private static void writeToSigner(Signature signer, VespaUniqueInstanceId providerUniqueId, AthenzService providerService, String configServerHostname, String instanceHostname, Instant createdAt, Set<String> ipAddresses, IdentityType identityType) throws SignatureException { signer.update(providerUniqueId.asDottedString().getBytes(UTF_8)); signer.update(providerService.getFullName().getBytes(UTF_8)); signer.update(configServerHostname.getBytes(UTF_8)); signer.update(instanceHostname.getBytes(UTF_8)); ByteBuffer timestampAsBuffer = ByteBuffer.allocate(Long.BYTES); timestampAsBuffer.putLong(createdAt.toEpochMilli()); signer.update(timestampAsBuffer.array()); for (String ipAddress : new TreeSet<>(ipAddresses)) { signer.update(ipAddress.getBytes(UTF_8)); } signer.update(identityType.id().getBytes(UTF_8)); } }
1,658
662
<filename>alibaba-rsocket-core/src/main/java/com/alibaba/rsocket/metadata/ServiceRegistryMetadata.java package com.alibaba.rsocket.metadata; import com.alibaba.rsocket.ServiceLocator; import com.alibaba.rsocket.encoding.JsonUtils; import com.fasterxml.jackson.annotation.JsonIgnore; import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; import java.util.HashSet; import java.util.Set; /** * service registry metadata: subscribed and published services from requester * * @author leijuan */ public class ServiceRegistryMetadata implements MetadataAware { /** * published services */ private Set<ServiceLocator> published = new HashSet<>(); /** * subscribed services */ private Set<ServiceLocator> subscribed = new HashSet<>(); public void addPublishedService(ServiceLocator publishedService) { this.published.add(publishedService); } public void addSubscribedService(ServiceLocator subscribedService) { this.subscribed.add(subscribedService); } public Set<ServiceLocator> getPublished() { return published; } public void setPublished(Set<ServiceLocator> published) { this.published = published; } public Set<ServiceLocator> getSubscribed() { return subscribed; } public void setSubscribed(Set<ServiceLocator> subscribed) { this.subscribed = subscribed; } @Override public RSocketMimeType rsocketMimeType() { return RSocketMimeType.ServiceRegistry; } @Override @JsonIgnore public String getMimeType() { return RSocketMimeType.ServiceRegistry.getType(); } @Override @JsonIgnore public ByteBuf getContent() { try { return JsonUtils.toJsonByteBuf(this); } catch (Exception e) { return Unpooled.EMPTY_BUFFER; } } @Override public void load(ByteBuf byteBuf) throws Exception { JsonUtils.updateJsonValue(byteBuf, this); } public boolean containPublishedServices() { return published != null && !published.isEmpty(); } public static ServiceRegistryMetadata from(ByteBuf content) { ServiceRegistryMetadata temp = new ServiceRegistryMetadata(); try { temp.load(content); } catch (Exception ignore) { } return temp; } }
901
713
<reponame>alionkun/LightCTR_commented<filename>LightCTR/distribut/ring_collect.h // // ring_collect.h // LightCTR // // Created by SongKuangshi on 2017/12/7. // Copyright © 2017 SongKuangshi. All rights reserved. // #ifndef ring_collect_h #define ring_collect_h #include "dist_machine_abst.h" #include <vector> #include <type_traits> #include "../common/buffer_fusion.h" #include "../common/avx.h" #include "../common/barrier.h" #include "../common/lock.h" const time_t kTimeoutRetryMSInterval = 10000; // especially design for GPUs' collective ring-reduce template<typename T> class Worker_RingReduce : public Dist_Machine_Abst { public: explicit Worker_RingReduce(size_t ring_size) : _ring_size(ring_size) { cur_node_id = Worker_RingReduce::Rank(); // begin from 0 assert(cur_node_id >= 0); recv_from_id = BEGIN_ID_OF_WORKER + 1 + (cur_node_id + _ring_size - 1) % _ring_size; send_to_id = BEGIN_ID_OF_WORKER + 1 + (cur_node_id + 1 + _ring_size) % _ring_size; printf("[RING] Ring %zu -> %zu -> %zu\n", recv_from_id, BEGIN_ID_OF_WORKER + 1 + cur_node_id, send_to_id); // check router gDelivery.get_router(recv_from_id); gDelivery.get_router(send_to_id); // TODO recovering boot mode, Copy parameters from the other conventional Ring worker // and Start send and receive by processing epoch_version } ~Worker_RingReduce() { assert(cache.empty()); segment_size_arr.clear(); segment_end_arr.clear(); } void syncGradient(std::shared_ptr<BufferFusion<T> > _buf_fusion, size_t epoch, bool do_Average = true) { init(_buf_fusion); step_version = epoch * (2 * _ring_size - 2); // Firstly do all-reduce reduce_step(_buf_fusion); // Secondly do all-gather gather_step(_buf_fusion); // Finally if (likely(do_Average)) { const float scalar = 1.0 / _ring_size; _buf_fusion->transform(0, _buf_fusion->size(), [scalar](T* begin, T* end) { avx_vecScale(begin, begin, end - begin, scalar); }); } #ifdef DEBUG printf("[RING] **** Epoch %zu synchronizer completed ****\n", epoch); #endif } void syncInitializer(std::shared_ptr<BufferFusion<T> > _buf_fusion) { init(_buf_fusion); step_version = _ring_size - 1; gather_step(_buf_fusion); } inline size_t Rank() const { // Rank begin from 0 return Dist_Machine_Abst::Rank() - 1; } private: void init(std::shared_ptr<BufferFusion<T> > _buf_fusion) { assert(_ring_size > 0); if (unlikely(_param_size != _buf_fusion->size())) { _param_size = _buf_fusion->size(); assert(_param_size > 0); segment_size_arr.resize(_ring_size); segment_end_arr.resize(_ring_size); const size_t seg_size = _param_size / _ring_size; const size_t seg_res = _param_size % _ring_size; for (size_t i = 0; i < _ring_size; i++) { segment_size_arr[i] = seg_size; if (i < seg_res) { segment_size_arr[i]++; } if (i == 0) { segment_end_arr[0] = segment_size_arr[0]; } else { segment_end_arr[i] = segment_end_arr[i - 1] + segment_size_arr[i]; } } } regist_reduce_gather_handler(_buf_fusion); } void reduce_step(std::shared_ptr<BufferFusion<T> > _buf_fusion) { for (size_t i = 0; i < _ring_size - 1; i++) { const size_t send_segment_id = (cur_node_id + _ring_size - i) % _ring_size; recv_segment_id = (cur_node_id + _ring_size - i - 1) % _ring_size; // send segment to next-skip on the ring topology rcv_offset = segment_end_arr[recv_segment_id] - segment_size_arr[recv_segment_id]; step_version++; wmb(); step_barrier.reset(); // receive segment from last-skip on the ring topology { unique_lock<SpinLock> glock(cache_lock); if (!cache.empty()) { #ifdef DEBUG printf("[REDUCE] step = %zu read from cache\n", step_version); #endif auto request = cache.front(); cache.pop(); assert(request->epoch_version == step_version); _do_reduce(_buf_fusion, request->content); step_barrier.unblock(); } } PackageDescript desc(REQUEST_PUSH, step_version); Buffer* buffer_ptr = nullptr; _buf_fusion->memcpy_out(&buffer_ptr, segment_end_arr[send_segment_id] - segment_size_arr[send_segment_id], segment_size_arr[send_segment_id]); desc.content = std::move(*buffer_ptr); #ifdef DEBUG desc.callback = [this](std::shared_ptr<PackageDescript> resp_package) { printf("[REDUCE] send step = %zu package success\n", step_version); }; #endif bool send_status = false; do { send_status = gDelivery.send_sync(desc, send_to_id, kTimeoutRetryMSInterval); #ifdef DEBUG if (unlikely(!send_status)) { // TODO dynamic waiting interval for network delay or crash of some machines printf("[ERROR][REDUCE] send step = %zu package failed, retry\n", step_version); } #endif } while(!send_status); step_barrier.block(); } } void gather_step(std::shared_ptr<BufferFusion<T> > _buf_fusion) { for (size_t i = 0; i < _ring_size - 1; i++) { const size_t send_segment_id = (cur_node_id + 1 + _ring_size - i) % _ring_size; recv_segment_id = (cur_node_id + _ring_size - i) % _ring_size; // send segment to next-skip on the ring topology rcv_offset = segment_end_arr[recv_segment_id] - segment_size_arr[recv_segment_id]; step_version++; wmb(); step_barrier.reset(); // receive segment from last-skip on the ring topology { unique_lock<SpinLock> glock(cache_lock); if (!cache.empty()) { #ifdef DEBUG printf("[GATHER] step = %zu read from cache\n", step_version); #endif auto request = cache.front(); cache.pop(); assert(request->epoch_version == step_version); _do_gather(_buf_fusion, request->content); step_barrier.unblock(); } } PackageDescript desc(REQUEST_PUSH, step_version); Buffer* buffer_ptr = nullptr; _buf_fusion->memcpy_out(&buffer_ptr, segment_end_arr[send_segment_id] - segment_size_arr[send_segment_id], segment_size_arr[send_segment_id]); desc.content = std::move(*buffer_ptr); #ifdef DEBUG desc.callback = [this](std::shared_ptr<PackageDescript> resp_package) { printf("[GATHER] send step = %zu package success\n", step_version); }; #endif bool send_status = false; do { send_status = gDelivery.send_sync(desc, send_to_id, kTimeoutRetryMSInterval); #ifdef DEBUG if (unlikely(!send_status)) { printf("[ERROR][GATHER] send step %zu package failed, retry\n", step_version); } #endif } while(!send_status); step_barrier.block(); } } void _do_reduce(std::shared_ptr<BufferFusion<T> > _buf_fusion, Buffer& data) { assert(segment_size_arr[recv_segment_id] * sizeof(T) == data.size()); // accumulate gradients if (is_same<T, float>::value) { // try to use AVX const float* buffer = reinterpret_cast<const float*>(data.buffer()); _buf_fusion->transform(rcv_offset, segment_size_arr[recv_segment_id], [&buffer](T* begin, T* end) { avx_vecAdd(buffer, begin, begin, end - begin); buffer += end - begin; }); } else { _buf_fusion->transform(rcv_offset, segment_size_arr[recv_segment_id], [&data](T* begin, T* end) { T grad_value; for (size_t i = 0; i < end - begin; i++) { data >> grad_value; *(begin + i) += grad_value; } }); assert(data.readEOF()); } } void _do_gather(std::shared_ptr<BufferFusion<T> > _buf_fusion, const Buffer& data) { assert(segment_size_arr[recv_segment_id] * sizeof(T) == data.size()); const float* buffer = reinterpret_cast<const float*>(data.buffer()); _buf_fusion->memcpy_in(rcv_offset, buffer, segment_size_arr[recv_segment_id]); } void regist_reduce_gather_handler(std::shared_ptr<BufferFusion<T> > _buf_fusion) { request_handler_t handler = [this, _buf_fusion](std::shared_ptr<PackageDescript> request, PackageDescript& response) { rmb(); assert(request->node_id > BEGIN_ID_OF_WORKER); const size_t worker_id = request->node_id; assert(worker_id == recv_from_id); { unique_lock<SpinLock> glock(cache_lock); if (step_version != request->epoch_version) { // cache the request into deque and response the situation cache.push(request); #ifdef DEBUG printf("[RING] receive not match %zu expected %zu, cache it\n", request->epoch_version, step_version); #endif response.epoch_version = step_version; return; } } assert(request->content.size() % sizeof(T) == 0); #ifdef DEBUG printf("[RING] step %zu: recv %zu gradients\n", step_version, request->content.size() / sizeof(T)); #endif const size_t type = step_version % (2 * _ring_size - 2); if (type > 0 && type < _ring_size) { _do_reduce(_buf_fusion, request->content); } else { _do_gather(_buf_fusion, request->content); } response.epoch_version = step_version; step_barrier.unblock(); }; gDelivery.regist_handler(REQUEST_PUSH, std::move(handler)); } std::vector<size_t> segment_size_arr; std::vector<size_t> segment_end_arr; volatile size_t step_version; volatile size_t rcv_offset; volatile size_t recv_segment_id; queue<std::shared_ptr<PackageDescript> > cache; SpinLock cache_lock; size_t _param_size; const size_t _ring_size = 0; size_t cur_node_id; size_t recv_from_id; size_t send_to_id; Barrier step_barrier; }; #endif /* ring_collect_h */
6,495
337
from common import * import pytest def test_preserve_int64(ds_local): ds = ds_local assert np.array(ds[['ints']], dtype=np.int64).dtype.kind == 'i', "expected int type precision" assert np.array(ds[['ints']], dtype=np.int64)[0][0] == -2**62-1, "lost precision" def test_safe_casting(ds_local): ds = ds_local # with pytest.raises(ValueError, match='.*Cannot cast.*', message='Should use safe casting rules (no precision loss)'): np.array(ds[['ints']]) with pytest.raises(ValueError, match='.*Cannot cast.*'): np.array(ds[['ints', 'x']], dtype=np.int64) pytest.fail('Should use safe casting rules (no precision loss)') def test_default_float64(ds_local): ds = ds_local assert np.array(ds[['x']]).dtype == np.dtype('f8'), "expected float precision"
322
810
<reponame>MuhammadZeeshan34/feast # For some reason patch releases with Semantic Release are tagged as "pre-release" on GitHub. This script # removes the "pre-release" tag from the release. import os import sys import requests USAGE = f"Usage: python {sys.argv[0]} [--help] | version_being_released (e.g., v0.19.1)]" def get_prerelease_status(version_being_released, token): url = f"https://api.github.com/repos/feast-dev/feast/releases/tags/v{version_being_released}" headers = { "Content-Type": "application/json", "Accept": "application/vnd.github.v3+json", "Authorization": f"Bearer {token}" } response = requests.request("GET", url, headers=headers) response_json = response.json() return bool(response_json['prerelease']), response_json['id'] def set_prerelease_status(release_id, status, token): url = f"https://api.github.com/repos/feast-dev/feast/releases/{release_id}" payload = {"prerelease": status} headers = { "Content-Type": "application/json", "Accept": "application/vnd.github.v3+json", "Authorization": f"Bearer {token}" } requests.request("PATCH", url, json=payload, headers=headers) def main() -> None: args = sys.argv[1:] if not args or len(args) != 1: raise SystemExit(USAGE) version_being_released = args[0].strip() # should look like 0.19.1 (without the v) print(f"Disabling prerelease status for {version_being_released}") token = os.getenv('GITHUB_TOKEN', default=None) if token is None: raise OSError("GITHUB_TOKEN environmental variable is not set") is_prerelease, release_id = get_prerelease_status(version_being_released, token) if is_prerelease: set_prerelease_status(release_id, False, token) else: print(f"{version_being_released} is not a pre-release, exiting.") exit(0) is_prerelease, release_id = get_prerelease_status(version_being_released, token) if is_prerelease: import warnings warnings.warn(f"Failed to unset prerelease status for {version_being_released} release id {release_id}") else: print(f"Successfully unset prerelease status for {version_being_released} release id {release_id}") if __name__ == "__main__": main()
858
1,338
/* * Copyright 2014-2020 Haiku, Inc. * Distributed under the terms of the MIT License. */ #ifndef FILE_TEST_H #define FILE_TEST_H #include <Url.h> #include <TestCase.h> #include <TestSuite.h> #include <cppunit/TestSuite.h> #include <tools/cppunit/ThreadedTestCase.h> #include "TestServer.h" class FileTest : public BThreadedTestCase { public: FileTest() {}; void StopTest(); static void AddTests(BTestSuite& suite); }; #endif
179
1,041
package io.ebeaninternal.server.deploy; import io.ebean.bean.EntityBean; import io.ebean.bean.PersistenceContext; import io.ebean.core.type.DataReader; import io.ebeaninternal.server.query.SqlJoinType; import java.sql.SQLException; /** * Local interface to handle Embedded, Reference and Reference Exported * cases. */ abstract class AssocOneHelp { final BeanPropertyAssocOne<?> property; private final BeanDescriptor<?> target; private final String path; AssocOneHelp(BeanPropertyAssocOne<?> property) { this(property, null); } AssocOneHelp(BeanPropertyAssocOne<?> property, String embeddedPrefix) { this.property = property; this.target = property.targetDescriptor; this.path = (embeddedPrefix == null) ? property.name : embeddedPrefix + "." + property.name; } /** * Effectively skip reading (the jdbc resultSet as already in the persistence context etc). */ void loadIgnore(DbReadContext ctx) { property.targetIdBinder.loadIgnore(ctx); } /** * Read and return the property. */ Object read(DataReader reader) throws SQLException { return property.read(reader); } /** * Read and return the property setting value into the bean. */ Object readSet(DataReader reader, EntityBean bean) throws SQLException { Object val = read(reader); if (bean != null) { property.setValue(bean, val); } return val; } /** * Read and return the bean. */ Object read(DbReadContext ctx) throws SQLException { // Support for Inheritance hierarchy on exported OneToOne ? Object id = property.targetIdBinder.read(ctx); if (id == null) { return null; } PersistenceContext pc = ctx.getPersistenceContext(); Object existing = target.contextGet(pc, id); if (existing != null) { return existing; } boolean disableLazyLoading = ctx.isDisableLazyLoading(); Object ref = target.contextRef(pc, ctx.isReadOnly(), disableLazyLoading, id); if (!disableLazyLoading) { ctx.register(path, ((EntityBean) ref)._ebean_getIntercept()); } return ref; } /** * Read setting values into the bean. */ Object readSet(DbReadContext ctx, EntityBean bean) throws SQLException { Object val = read(ctx); if (bean != null) { property.setValue(bean, val); ctx.propagateState(val); } return val; } /** * Append to the select clause. */ abstract void appendSelect(DbSqlContext ctx, boolean subQuery); /** * Append to the from clause. */ void appendFrom(DbSqlContext ctx, SqlJoinType joinType) { // nothing required here } }
916
965
<reponame>bobbrow/cpp-docs<gh_stars>100-1000 CCirc myCirc; myCirc.SubclassDlgItem(IDC_CIRCCTRL2, this); // ... use myCirc ... myCirc.UnsubclassWindow();
65
372
<reponame>yoshi-code-bot/google-api-java-client-services /* * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ /* * This code was generated by https://github.com/googleapis/google-api-java-client-services/ * Modify at your own risk. */ package com.google.api.services.apigee.v1.model; /** * Model definition for GoogleCloudApigeeV1KeyAliasReference. * * <p> This is the Java data model class that specifies how to parse/serialize into the JSON that is * transmitted over HTTP when working with the Apigee API. For a detailed explanation see: * <a href="https://developers.google.com/api-client-library/java/google-http-java-client/json">https://developers.google.com/api-client-library/java/google-http-java-client/json</a> * </p> * * @author Google, Inc. */ @SuppressWarnings("javadoc") public final class GoogleCloudApigeeV1KeyAliasReference extends com.google.api.client.json.GenericJson { /** * Alias ID. Must exist in the keystore referred to by the reference. * The value may be {@code null}. */ @com.google.api.client.util.Key private java.lang.String aliasId; /** * Reference name in the following format: * `organizations/{org}/environments/{env}/references/{reference}` * The value may be {@code null}. */ @com.google.api.client.util.Key private java.lang.String reference; /** * Alias ID. Must exist in the keystore referred to by the reference. * @return value or {@code null} for none */ public java.lang.String getAliasId() { return aliasId; } /** * Alias ID. Must exist in the keystore referred to by the reference. * @param aliasId aliasId or {@code null} for none */ public GoogleCloudApigeeV1KeyAliasReference setAliasId(java.lang.String aliasId) { this.aliasId = aliasId; return this; } /** * Reference name in the following format: * `organizations/{org}/environments/{env}/references/{reference}` * @return value or {@code null} for none */ public java.lang.String getReference() { return reference; } /** * Reference name in the following format: * `organizations/{org}/environments/{env}/references/{reference}` * @param reference reference or {@code null} for none */ public GoogleCloudApigeeV1KeyAliasReference setReference(java.lang.String reference) { this.reference = reference; return this; } @Override public GoogleCloudApigeeV1KeyAliasReference set(String fieldName, Object value) { return (GoogleCloudApigeeV1KeyAliasReference) super.set(fieldName, value); } @Override public GoogleCloudApigeeV1KeyAliasReference clone() { return (GoogleCloudApigeeV1KeyAliasReference) super.clone(); } }
998
409
<reponame>JensUweUlrich/seqan // ========================================================================== // NGS: Regions of Interest Analysis // ========================================================================== // Copyright (c) 2012-2018, <NAME>, Institut Pasteur // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of Knut Reinert or the FU Berlin nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL KNUT REINERT OR THE FU BERLIN BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY // OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // // ========================================================================== // Author: <NAME> <<EMAIL>> // Author: <NAME> <<EMAIL>> // ========================================================================== #include "project_spliced.h" // --------------------------------------------------------------------------- // Member Function ProjectSplicedRoi::~ProjectSplicedRoi() // --------------------------------------------------------------------------- ProjectSplicedRoi::~ProjectSplicedRoi() { while (currentGroup < length(ranges)) { writeCurrentGroup(); purgeGffAndRoiRecords(); currentGroup += 1; } } // --------------------------------------------------------------------------- // Member Function ProjectedSplicedRoi::beginContig() // --------------------------------------------------------------------------- void ProjectSplicedRoi::beginContig() { // Write out all remaining records for this contig. while (currentGroup < length(ranges)) { writeCurrentGroup(); purgeGffAndRoiRecords(); currentGroup += 1; } // Remove pending ROI records. roiRecords.clear(); // Clear open GFF records and names for the records. // TODO(holtgrew): Can we add some checks here that confirm that all has be handled? clear(gffRecords); gffGroups.clear(); // Clear group name store and cache and ranges. clear(groupNames); refresh(groupNamesCache); clear(ranges); // Set current group to invalid. currentGroup = std::numeric_limits<unsigned>::max(); } // --------------------------------------------------------------------------- // Member Function ProjectSplicedRoi::beginSecondPass() // --------------------------------------------------------------------------- void ProjectSplicedRoi::beginSecondPass() { if (verbosity >= 3) for (unsigned i = 0; i < length(groupNames); ++i) std::cerr << groupNames[i] << "\t" << ranges[i].i1 << "\t" << ranges[i].i2 << "\n"; // Sort the group names by begin position of the group (ties are broken by endPosition). seqan::String<seqan::Pair<TIntPair, seqan::CharString> > buffer; resize(buffer, length(groupNames), seqan::Exact()); for (unsigned i = 0; i < length(groupNames); ++i) { buffer[i].i1 = ranges[i]; buffer[i].i2 = groupNames[i]; } std::sort(begin(buffer, seqan::Standard()), end(buffer, seqan::Standard())); // Write back in correct order and refresh group names cache. for (unsigned i = 0; i < length(buffer); ++i) { ranges[i] = buffer[i].i1; groupNames[i] = buffer[i].i2; } refresh(groupNamesCache); // Set first group as active. currentGroup = 0; } // --------------------------------------------------------------------------- // Member Function ProjectSplicedRoi::_updateRanges() // --------------------------------------------------------------------------- void ProjectSplicedRoi::_updateRanges(seqan::GffRecord const & record, seqan::Segment<seqan::CharString, seqan::InfixSegment> const & name) { if (verbosity >= 3) std::cerr << "Updating " << name << "\t" << record.beginPos << "\t" << record.endPos << "\n"; unsigned idx = 0; if (getIdByName(idx, groupNamesCache, name)) { ranges[idx].i1 = std::min(ranges[idx].i1, (int)record.beginPos); ranges[idx].i2 = std::max(ranges[idx].i2, (int)record.endPos); } else { idx = length(groupNames); appendName(groupNamesCache, name); appendValue(ranges, TIntPair(record.beginPos, record.endPos)); } } // --------------------------------------------------------------------------- // Member Function ProjectSplicedRoi::updateRanges() // --------------------------------------------------------------------------- void ProjectSplicedRoi::updateRanges(seqan::GffRecord const & record) { // Get group name (possibly comma separated list). seqan::CharString groupNames; for (unsigned i = 0; i < length(record.tagNames); ++i) if (record.tagNames[i] == groupBy) groupNames = record.tagValues[i]; if (empty(groupNames)) return; // Record has no group names. // Parse out the group names. unsigned beginPos = 0; unsigned endPos = 0; for (; endPos <= length(groupNames); ++endPos) { if (endPos == length(groupNames) && beginPos < endPos) // Ignore empty keys. { _updateRanges(record, infix(groupNames, beginPos, endPos)); } else if (groupNames[endPos] == ',') { if (beginPos < endPos) // Ignore empty keys. _updateRanges(record, infix(groupNames, beginPos, endPos)); beginPos = endPos + 1; } } } // --------------------------------------------------------------------------- // Member Function ProjectSplicedRoi::pushGff() // --------------------------------------------------------------------------- void ProjectSplicedRoi::pushGff(seqan::GffRecord const & record) { if (verbosity >= 3) { std::cerr << "Pushing GFF record.\n "; writeRecord(std::cerr, const_cast<seqan::GffRecord &>(record), seqan::Gff()); } gffRecords.push_back(record); // Get string set of group names for the record. seqan::StringSet<seqan::CharString> groups; for (unsigned i = 0; i < length(record.tagNames); ++i) if (record.tagNames[i] == groupBy) { strSplit(groups, record.tagValues[i], seqan::EqualsChar<','>()); break; } gffGroups.push_back(groups); if (verbosity >= 3) { std::cerr << "Groups:"; for (unsigned i = 0; i < length(groups); ++i) std::cerr << " " << groups[i]; std::cerr << "\n"; } } // --------------------------------------------------------------------------- // Member Function ProjectSplicedRoi::pushRoi() // --------------------------------------------------------------------------- void ProjectSplicedRoi::pushRoi(seqan::RoiRecord const & record) { if (verbosity >= 3) { std::cerr << "Pushing ROI record.\n"; std::cerr << " "; writeRecord(std::cerr, record, seqan::Roi()); } roiRecords.push_back(record); // If the current group cannot overlap with any ROIs on from record then we can write it out. This is followed by // removing any GFF and ROI records that are used up. if (currentGroup < length(ranges) && record.beginPos >= ranges[currentGroup].i2) { writeCurrentGroup(); purgeGffAndRoiRecords(); currentGroup += 1; } } // --------------------------------------------------------------------------- // Member Function ProjectSplicedRoi::writeCurrentGroup() // --------------------------------------------------------------------------- // Return true iff lhs and rhs overlap (both are BED or ROI records). template <typename TLeft, typename TRight> bool overlap(TLeft const & lhs, TRight const & rhs) { return (rhs.beginPos < lhs.endPos) && (lhs.beginPos < rhs.endPos); } void ProjectSplicedRoi::writeCurrentGroup() { if (verbosity >= 3) std::cerr << "Writing current group (" << currentGroup << ": " << groupNames[currentGroup] << ")\n"; // Collect all begin/end position pairs for GFF records with the grouping key set to currentName. seqan::Reference<TNameStore>::Type currentName = groupNames[currentGroup]; seqan::String<seqan::Pair<int, int> > pairs; typedef std::list<seqan::StringSet<seqan::CharString> >::iterator TGroupsIter; TGroupsIter itG = gffGroups.begin(); typedef std::list<seqan::GffRecord>::iterator TGffIter; for (TGffIter it = gffRecords.begin(); it != gffRecords.end(); ++it, ++itG) { if (std::find(begin(*itG, seqan::Standard()), end(*itG, seqan::Standard()), currentName) == end(*itG)) continue; // No match. appendValue(pairs, seqan::Pair<int, int>(it->beginPos, it->endPos)); } if (verbosity >= 3) { std::cerr << "Has the following " << length(pairs) << " GFF intervals\n"; for (unsigned i = 0; i < length(pairs); ++i) std::cerr << " " << pairs[i].i1 << "\t" << pairs[i].i2 << "\n"; } if (empty(pairs)) return; // Nothing to do. // Sort the begin/end positions by begin position. std::sort(begin(pairs, seqan::Standard()), end(pairs, seqan::Standard())); // Compute prefix sums of positions in result. seqan::String<int> beginPositions; appendValue(beginPositions, 0); for (unsigned i = 0; i < length(pairs); ++i) appendValue(beginPositions, back(beginPositions) + (pairs[i].i2 - pairs[i].i1)); if (verbosity >= 3) { std::cerr << "Begin positions:"; for (unsigned i = 0; i < length(beginPositions); ++i) std::cerr << " " << beginPositions[i]; std::cerr << "\n"; } // TODO(holtgrew): Check that the intervals in pairs don't overlap? // Create resulting ROI. seqan::RoiRecord record; record.ref = gffRecords.front().ref; record.beginPos = front(pairs).i1; record.endPos = back(pairs).i2; record.strand = gffRecords.front().strand; record.name = currentName; // TODO(holtgrew): Make unique. record.len = back(beginPositions); record.countMax = 0; resize(record.count, record.len, 0); // Project the ROI counts on the GFF intervals. typedef std::list<seqan::RoiRecord>::iterator TRoiIter; for (TRoiIter it = roiRecords.begin(); it != roiRecords.end(); ++it) { for (unsigned i = 0; i < length(pairs); ++i) { if (verbosity >= 3) { std::cerr << "ROI record\n "; writeRecord(std::cerr, *it, seqan::Roi()); } if (!(pairs[i].i1 < it->endPos && it->beginPos < pairs[i].i2)) continue; if (verbosity >= 3) std::cerr << "=> overlapping\n"; // Begin and end position of projected interval on contig. int beginPosI = std::max(it->beginPos, pairs[i].i1); int endPosI = std::min(it->endPos, pairs[i].i2); if (beginPosI >= endPosI) continue; // Skip // Begin position in record.count. int offsetR = beginPositions[i] + beginPosI - pairs[i].i1; SEQAN_ASSERT_GEQ(offsetR, 0); // Begin position in it->count. int offsetC = beginPosI - it->beginPos; SEQAN_ASSERT_GEQ(offsetC, 0); if (verbosity >= 3) std::cerr << ">>> beginPosI = " << beginPosI << "\n" << ">>> endPosI = " << endPosI << "\n" << ">>> offsetR = " << offsetR << "\n" << ">>> offsetC = " << offsetC << "\n" << ">>> beginPositions[i] = " << beginPositions[i] << "\n" << ">>> it->beginPos = " << it->beginPos << "\n"; SEQAN_ASSERT_LEQ(offsetR + endPosI - beginPosI, (int)record.len); SEQAN_ASSERT_LEQ(offsetC + endPosI - beginPosI, (int)length(it->count)); if (verbosity >= 3) { std::cerr << "PROJECTING [" << offsetC << ", " << (offsetC + endPosI - beginPosI) << ") TO " << "[" << offsetR << ", " << offsetR + endPosI - beginPosI << ")\n"; } for (int i = 0, len = endPosI - beginPosI; i < len; ++i) record.count[offsetR + i] = it->count[offsetC + i]; } } for (unsigned i = 0; i < length(record.count); ++i) record.countMax = std::max(record.countMax, record.count[i]); writeRecord(roiFileOut, record); if (verbosity >= 3) std::cerr << "RESULT\t record.ref == " << record.ref << "\n"; } // --------------------------------------------------------------------------- // Member Function ProjectSplicedRoi::purgeGffAndRoiRecords() // --------------------------------------------------------------------------- void ProjectSplicedRoi::purgeGffAndRoiRecords() { if (verbosity >= 3) std::cerr << "Purging GFF and ROI records.\n" << " current group: " << ranges[currentGroup].i1 << ", " << ranges[currentGroup].i2 << "\n"; typedef std::list<seqan::GffRecord>::iterator TGffIter; typedef std::list<seqan::StringSet<seqan::CharString> >::iterator TGroupsIter; TGroupsIter itG = gffGroups.begin(); for (TGffIter it = gffRecords.begin(); it != gffRecords.end();) if ((int)it->endPos <= ranges[currentGroup].i1) { if (verbosity >= 3) { std::cerr << "Purging\t"; writeRecord(std::cerr, *it, seqan::Gff()); } TGffIter itE = it++; gffRecords.erase(itE); TGroupsIter itEG = itG++; gffGroups.erase(itEG); } else { ++it; ++itG; } for (TGffIter it = gffRecords.begin(); it != gffRecords.end(); ++it) SEQAN_ASSERT_GT((int)it->endPos, ranges[currentGroup].i1); typedef std::list<seqan::RoiRecord>::iterator TRoiIter; for (TRoiIter it = roiRecords.begin(); it != roiRecords.end();) if (it->endPos <= ranges[currentGroup].i1) { if (verbosity >= 3) { std::cerr << "Purging\t"; writeRecord(std::cerr, *it, seqan::Roi()); } TRoiIter itE = it++; roiRecords.erase(itE); } else { ++it; } }
6,060
1,333
package org.xujin.moss.client.config; import org.springframework.boot.autoconfigure.condition.ConditionalOnBean; import org.springframework.context.annotation.Bean; import org.xujin.moss.client.eureka.EurekaAutoRegistrationCustomizer; @ConditionalOnBean(name = "org.springframework.cloud.netflix.eureka.EurekaInstanceConfigBean") public class EurekaClientAutoConfiguartion { @Bean public EurekaAutoRegistrationCustomizer eurekaAutoRegistrationCustomizer() { return new EurekaAutoRegistrationCustomizer(); } }
175
841
package org.jboss.resteasy.test.cdi.validation.resource; import org.jboss.logging.Logger; import jakarta.enterprise.context.RequestScoped; import jakarta.ws.rs.core.Response; @RequestScoped public class SubResourceImpl implements SubResource { static boolean methodEntered; private static final Logger LOG = Logger.getLogger(SubResourceImpl.class); @Override public Response getAll(QueryBeanParamImpl beanParam) { LOG.info("beanParam#getParam valid? " + beanParam.getParam()); methodEntered = true; return Response.ok().build(); } }
193
1,918
<reponame>beliaev-maksim/pyaedt<filename>pyaedt/third_party/ironpython/plumbum/path/__init__.py<gh_stars>1000+ # -*- coding: utf-8 -*- from plumbum.path.base import FSUser, Path, RelativePath from plumbum.path.local import LocalPath, LocalWorkdir from plumbum.path.remote import RemotePath, RemoteWorkdir from plumbum.path.utils import copy, delete, move
130
852
import FWCore.ParameterSet.Config as cms process = cms.Process('RECO2') process.load("FWCore.MessageLogger.MessageLogger_cfi") process.MessageLogger.cerr.FwkReport.reportEvery = 100 # import of standard configurations process.load('Configuration/StandardSequences/Geometry_cff') process.load('Configuration/StandardSequences/MagneticField_38T_cff') process.load('Configuration/StandardSequences/FrontierConditions_GlobalTag_cff') process.load('Configuration/StandardSequences/Reconstruction_cff') process.load('Configuration/EventContent/EventContent_cff') #process.load('RecoVertex/AdaptiveVertexFinder/inclusiveVertexing_cff') process.options = cms.untracked.PSet( Rethrow = cms.untracked.vstring('ProductNotFound'), wantSummary = cms.untracked.bool(True) ) # Input source process.source = cms.Source("PoolSource", # skipEvents = cms.untracked.uint32(51), fileNames = cms.untracked.vstring( #'file:/data1/arizzi/CMSSW_3_5_6/src/MCQCD80120START_C07EAA09-2D2C-DF11-87E7-002618943886.root' #'/store/relval/CMSSW_7_0_0/RelValTTbar_13/GEN-SIM-RECO/PU50ns_POSTLS170_V4-v2/00000/265B9219-FF98-E311-BF4A-02163E00EA95.root' #'/store/mc/Summer12_DR53X/TTJets_MassiveBinDECAY_TuneZ2star_8TeV-madgraph-tauola/AODSIM/PU_S10_START53_V7A-v1/0000/001C868B-B2E1-E111-9BE3-003048D4DCD8.root' #'/store/mc/Fall13dr/EWKZjj_mqq120_mll50_13TeV_madgraph-pythia8/AODSIM/tsg_PU20bx25_POSTLS162_V2-v1/00000/0087CB53-3576-E311-BB3D-848F69FD5027.root' #'/store/mc/Fall13dr/EWKZjj_mqq120_mll50_13TeV_madgraph-pythia8/AODSIM/tsg_PU40bx25_POSTLS162_V2-v1/00000/00A356DA-0C76-E311-B789-7845C4FC364D.root' "file:/scratch/arizzi/CMSSW_7_2_0_pre5/src/CC1E897F-6930-E411-80D4-0025905A6134.root" #"root://xrootd-redic.pi.infn.it:1194//store/relval/CMSSW_7_2_0_pre4/RelValTTbar_13/GEN-SIM-RECO/PU25ns_POSTLS172_V3-v3/00000/145DA8E3-C227-E411-927B-0025905A60EE.root" #'/store/mc/Summer13dr53X/QCD_Pt-80to120_TuneZ2star_13TeV-pythia6/AODSIM/PU25bx25_START53_V19D-v1/20000/006F508D-3AE4-E211-B654-90E6BA0D09D4.root' ) ) process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1000) ) # Output definition process.FEVT = cms.OutputModule("PoolOutputModule", fileName = cms.untracked.string('out.root') ) # Other statements process.GlobalTag.globaltag = 'POSTLS172_V4::All' process.p = cms.Path(process.inclusiveVertexing*process.inclusiveCandidateVertexing) process.out_step = cms.EndPath(process.FEVT)
1,101
343
<reponame>yang-xiansen/drp<filename>src/main/java/com/originspark/drp/models/projects/costs/AbstractCost.java package com.originspark.drp.models.projects.costs; import java.math.BigDecimal; import java.text.SimpleDateFormat; import java.util.Date; import javax.persistence.Column; import javax.persistence.MappedSuperclass; import javax.persistence.PrePersist; import javax.persistence.PreUpdate; import javax.persistence.Temporal; import javax.persistence.TemporalType; import com.fasterxml.jackson.annotation.JsonIgnore; import com.originspark.drp.models.AbstractModel; @MappedSuperclass public abstract class AbstractCost extends AbstractModel { private static SimpleDateFormat forYearMonthFormatter = new SimpleDateFormat("yyyy-MM"); /** * 该字段和对应invoice的时间字段保持一致,为冗余字段, * 只为简化盘点计算,forYearMonth同理 * 日期:年-月-日 */ @Temporal(TemporalType.DATE) private Date forDate; /** * 日期:年-月 */ @JsonIgnore @Column(columnDefinition = "char(7)", nullable = false) private String forYearMonth; /** * 单价 */ @Column(name="unitPrice", precision = 10, scale = 3, nullable = true) private BigDecimal unitPrice = BigDecimal.ZERO; /** * 数量 */ @Column(name="quantity", precision = 10, scale = 0, nullable = false) private BigDecimal quantity = BigDecimal.ZERO; /** * 合价 */ private BigDecimal total = BigDecimal.ZERO; public static enum COLUMNS { INVOICE } public BigDecimal getUnitPrice() { return unitPrice; } public void setUnitPrice(BigDecimal unitPrice) { if(unitPrice == null){ this.unitPrice = BigDecimal.ZERO; } this.unitPrice = unitPrice; } public BigDecimal getQuantity() { return quantity; } public void setQuantity(BigDecimal quantity) { this.quantity = quantity; } public BigDecimal getTotal() { return total; } public void setTotal(BigDecimal total) { this.total = total; } public Date getForDate() { return forDate; } public void setForDate(Date forDate) { this.forDate = forDate; if (forDate != null) { setForYearMonth(forYearMonthFormatter.format(forDate)); } } public String getForYearMonth() { return forYearMonth; } public void setForYearMonth(String forYearMonth) { this.forYearMonth = forYearMonth; } @Override public String toString() { return super.toString()+", unitPrice="+unitPrice+", quantity="+quantity; } @PrePersist public void prePersist(){ if(getUnitPrice() == null || getQuantity() == null){ setTotal(BigDecimal.ZERO); } setTotal(getUnitPrice().multiply(getQuantity())); } @PreUpdate public void PreUpdate(){ if(getUnitPrice() == null || getQuantity() == null){ setTotal(BigDecimal.ZERO); } setTotal(getUnitPrice().multiply(getQuantity())); } }
1,340
629
#! /usr/bin/env python """ Learning Series: Network Programmability Basics Module: Network Device APIs Lesson: NX-API Part 2 Author: <NAME> <<EMAIL>> sbx_setup.py Deploy baseline configuration for the lesson: - Add 3 VLANs to the Switch Configuration """ __author__ = "<NAME>" __author_email__ = "<EMAIL>" __copyright__ = "Copyright (c) 2016 Cisco Systems, Inc." __license__ = "MIT" import requests import json requests.packages.urllib3.disable_warnings( requests.packages.urllib3.exceptions.InsecureRequestWarning ) # Switch Connection Info url = "https://sbx-nxos-mgmt.cisco.com/ins" switchuser = "admin" switchpassword = "<PASSWORD>!" myheaders = {'content-type': 'application/json'} # Configuration Payload payload = { "ins_api": { "version": "1.0", "type": "cli_conf", "chunk": "0", "sid": "1", "input": "vlan 201 ;name Web_VLAN ;vlan 202 ;name App_VLAN ;vlan 203 ;name Data_VLAN", # noqa "output_format": "json" } } # Send Configuration response = requests.post( url, data=json.dumps(payload), headers=myheaders, auth=(switchuser, switchpassword), verify=False ).json()
548
676
<filename>app/src/main/java/com/alorma/github/ui/activity/WelcomeActivity.java<gh_stars>100-1000 package com.alorma.github.ui.activity; import android.accounts.AccountAuthenticatorActivity; import android.app.Activity; import android.content.Intent; import android.os.Build; import android.os.Bundle; import android.support.annotation.StringRes; import android.support.design.widget.TextInputLayout; import android.text.Editable; import android.text.TextUtils; import android.text.TextWatcher; import android.view.View; import android.widget.Toast; import butterknife.BindView; import butterknife.ButterKnife; import com.afollestad.materialdialogs.MaterialDialog; import com.alorma.github.R; import com.alorma.github.ui.activity.login.AlternateLoginActivity; import com.alorma.github.ui.activity.login.OtpCodeActivity; import com.alorma.github.utils.KeyboardUtils; import core.User; public class WelcomeActivity extends AccountAuthenticatorActivity implements WelcomePresenterViewInterface { private static final int OTP_REQUEST = 1121; @BindView(R.id.openLogin) View buttonLogin; @BindView(R.id.openAlternateLogin) View buttonLoginToken; @BindView(R.id.login_username) TextInputLayout loginUsername; @BindView(R.id.login_password) TextInputLayout loginPassword; private WelcomePresenter welcomePresenter; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_welcome); ButterKnife.bind(this); if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) { getWindow().addFlags(View.SYSTEM_UI_FLAG_LAYOUT_STABLE | View.SYSTEM_UI_FLAG_LAYOUT_HIDE_NAVIGATION); } welcomePresenter = new WelcomePresenter(this); buttonLogin.setEnabled(false); TextWatcher buttonEnablerTextWatcher = new ButtonEnablerTextWatcher(); addTextWatcher(loginUsername, buttonEnablerTextWatcher); addTextWatcher(loginPassword, buttonEnablerTextWatcher); buttonLogin.setOnClickListener(v -> { String username = getFromTextInputLayout(loginUsername); String passwords = getFromTextInputLayout(loginPassword); if (!TextUtils.isEmpty(username) && !TextUtils.isEmpty(passwords)) { login(username, passwords); } }); } private void login(String username, String passwords) { welcomePresenter.login(username, passwords); } @Override public void willLogin() { buttonLogin.setEnabled(false); } @Override public void onErrorUnauthorized() { loginPassword.setError(getString(R.string.unauthorized_login)); showAlternateLogin(); } private void showAlternateLogin() { buttonLoginToken.setVisibility(View.VISIBLE); buttonLoginToken.setOnClickListener(view -> openAlternateLogin()); } private void openAlternateLogin() { Intent intent = new Intent(this, AlternateLoginActivity.class); startActivity(intent); finish(); } @Override public void onErrorTwoFactorException() { show2faRequest(R.string.write_otp_code_sms); } @Override public void onErrorTwoFactorAppException() { show2faRequest(R.string.write_otp_code_app); } private void show2faRequest(@StringRes int message) { new MaterialDialog.Builder(this).input("Otp code", null, true, (dialog, input) -> { }).positiveText("Ok").negativeText("Cancel").onPositive((dialog, which) -> { if (dialog.getInputEditText() != null && dialog.getInputEditText().getText() != null) { String otpCode = dialog.getInputEditText().getText().toString(); welcomePresenter.setOtpCode(otpCode); } }).title(message).show(); } @Override public void onGenericError() { Toast.makeText(this, "Error login", Toast.LENGTH_SHORT).show(); } @Override public void finishAccess(final User user) { setResult(Activity.RESULT_OK); MainActivity.startActivity(this); finish(); } @Override public void didLogin() { buttonLogin.setEnabled(true); if (buttonLogin != null) { KeyboardUtils.lowerKeyboard(this); } } public String getFromTextInputLayout(TextInputLayout inputLayout) { if (inputLayout != null && inputLayout.getEditText() != null && inputLayout.getEditText().getText() != null) { return inputLayout.getEditText().getText().toString(); } return null; } public void addTextWatcher(TextInputLayout inputLayout, TextWatcher textWatcher) { if (inputLayout != null && inputLayout.getEditText() != null && textWatcher != null) { inputLayout.getEditText().addTextChangedListener(textWatcher); } } @Override protected void onActivityResult(int requestCode, int resultCode, Intent data) { super.onActivityResult(requestCode, resultCode, data); if (resultCode == RESULT_OK) { if (requestCode == OTP_REQUEST) { String code = data.getStringExtra(OtpCodeActivity.EXTRA_MESSAGE); if (code != null) { welcomePresenter.setOtpCode(code); } } } } @Override protected void onStart() { super.onStart(); welcomePresenter.start(this); } @Override protected void onStop() { welcomePresenter.stop(); super.onStop(); } private class ButtonEnablerTextWatcher implements TextWatcher { @Override public void beforeTextChanged(CharSequence s, int start, int count, int after) { } @Override public void onTextChanged(CharSequence s, int start, int before, int count) { String username = getFromTextInputLayout(loginUsername); String passwords = getFromTextInputLayout(loginPassword); buttonLogin.setEnabled(!TextUtils.isEmpty(username) && !TextUtils.isEmpty(passwords)); } @Override public void afterTextChanged(Editable s) { } } }
1,955
1,652
<reponame>MrTutao/x-pipe<filename>redis/redis-meta/src/main/java/com/ctrip/xpipe/redis/meta/server/crdt/replication/PeerMasterAdjustJobFactory.java<gh_stars>1000+ package com.ctrip.xpipe.redis.meta.server.crdt.replication; import com.ctrip.xpipe.redis.meta.server.job.PeerMasterAdjustJob; public interface PeerMasterAdjustJobFactory { PeerMasterAdjustJob buildPeerMasterAdjustJob(String clusterId, String shardId); }
155
4,640
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file src/runtime/contrib/arm_compute_lib/acl_utils.h * \brief Utils and common functions for the interface. */ #ifndef TVM_RUNTIME_CONTRIB_ARM_COMPUTE_LIB_ACL_UTILS_H_ #define TVM_RUNTIME_CONTRIB_ARM_COMPUTE_LIB_ACL_UTILS_H_ #include <arm_compute/core/Types.h> #include <arm_compute/runtime/MemoryManagerOnDemand.h> #include <arm_compute/runtime/Tensor.h> #include <memory> #include <string> #include <vector> #include "../json/json_node.h" namespace tvm { namespace runtime { namespace contrib { using JSONGraphNode = tvm::runtime::json::JSONGraphNode; /*! * \brief Check if there are any errors from acl and forward them to TVM. * * Status values: * - 0 => OK * - 1 => RUNTIME_ERROR * - 2 => UNSUPPORTED_EXTENSION_USE * * \param status status of called function. */ void CheckACLError(const arm_compute::Status& status); /*! * \brief Make an acl tensor from JSON tensor representation. * * \param tensor_rep A JSON tensor representation. * \param data (optional) Initialize the tensor with memory. * \param scale (optional) The quantization scale. * \param offset (optional) The quantization offset. * \return arm_compute::Tensor. */ arm_compute::Tensor MakeACLTensor(const JSONGraphNode& tensor_rep, void* data = nullptr, const DLTensor* scale = nullptr, const DLTensor* offset = nullptr, bool apply_dim_correction = true, bool increase_dim_unit = true, uint32_t entry_index = 0); /*! * \brief Make an acl tensor info object from JSON tensor * representation. * * \param shape The shape of the tensor to create. * \param dtype The data type of the tensor to create. * \param scale (optional) The quantization scale. * \param offset (optional) The quantization offset. * \return arm_compute::TensorInfo. */ arm_compute::TensorInfo MakeACLTensorInfo(const std::vector<int64_t>& shape, const DLDataType& dtype, const DLTensor* scale = nullptr, const DLTensor* offset = nullptr, bool apply_dim_correction = true, bool increase_dim_unit = true); /*! * \brief Create a memory manager for use with a layer that * requires working memory. * * \return reference counted memory manager. */ std::shared_ptr<arm_compute::MemoryManagerOnDemand> MakeACLMemoryManager(); /*! * \brief Convert TVM padding and stride format to acl PadStrideInfo. * * \param pad The pad vector. * \param stride The stride vector. * \param ceil_mode Dimensions rounding. * \return arm_compute::PadStrideInfo */ arm_compute::PadStrideInfo MakeACLPadStride(const std::vector<std::string>& pad, const std::vector<std::string>& stride, bool ceil_mode = false); /*! * \brief Convert DLDataType to arm_compute::DataType. * * \param data_type The data type to convert. * \return arm_compute::DataType. */ arm_compute::DataType MakeACLDataType(const DLDataType& data_type); /*! * \brief Convert string to arm_compute::ActivationLayerInfo * * \param activation_type A string representing activation function. * Currently supports the following options: "relu". * \return arm_compute::ActivationLayerInfo. */ arm_compute::ActivationLayerInfo MakeACLActivationInfo(const std::string& activation_type); /*! * \brief Get a vector from DLTensor data. * \note Performs a copy of data. * * \tparam T The type of the vector. * \param tensor The tensor to convert. * \return Vector of type T. */ template <typename T> std::vector<T> GetVectorFromDLTensor(const DLTensor* tensor); } // namespace contrib } // namespace runtime } // namespace tvm #endif // TVM_RUNTIME_CONTRIB_ARM_COMPUTE_LIB_ACL_UTILS_H_
1,712
574
/* * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://www.apache.org/licenses/LICENSE-2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. * */ package com.amazon.opendistroforelasticsearch.sql.legacy; import static com.amazon.opendistroforelasticsearch.sql.legacy.TestsConstants.TEST_INDEX_DEEP_NESTED; import static com.amazon.opendistroforelasticsearch.sql.util.MatcherUtils.rows; import static com.amazon.opendistroforelasticsearch.sql.util.MatcherUtils.schema; import static com.amazon.opendistroforelasticsearch.sql.util.MatcherUtils.verifyDataRows; import static com.amazon.opendistroforelasticsearch.sql.util.MatcherUtils.verifySchema; import com.amazon.opendistroforelasticsearch.sql.legacy.utils.StringUtils; import org.json.JSONArray; import org.json.JSONObject; import org.junit.Assume; import org.junit.Test; /** * Integration test for Elasticsearch object field (and nested field). * This class is focused on simple SELECT-FROM query to ensure right column * number and value is returned. */ public class ObjectFieldSelectIT extends SQLIntegTestCase { @Override protected void init() throws Exception { loadIndex(Index.DEEP_NESTED); } @Test public void testSelectObjectFieldItself() { JSONObject response = new JSONObject(query("SELECT city FROM %s")); verifySchema(response, schema("city", null, "object")); // Expect object field itself is returned in a single cell verifyDataRows(response, rows(new JSONObject( "{\n" + " \"name\": \"Seattle\",\n" + " \"location\": {\"latitude\": 10.5}\n" + "}") ) ); } @Test public void testSelectObjectInnerFields() { JSONObject response = new JSONObject(query( "SELECT city.location, city.location.latitude FROM %s")); verifySchema(response, schema("city.location", null, "object"), schema("city.location.latitude", null, "double") ); // Expect inner regular or object field returned in its single cell verifyDataRows(response, rows( new JSONObject("{\"latitude\": 10.5}"), 10.5 ) ); } @Test public void testSelectNestedFieldItself() { JSONObject response = new JSONObject(query("SELECT projects FROM %s")); // Nested field is absent in ES Get Field Mapping response either hence "object" used verifySchema(response, schema("projects", null, "object")); // Expect nested field itself is returned in a single cell verifyDataRows(response, rows(new JSONArray( "[\n" + " {\"name\": \"AWS Redshift Spectrum querying\"},\n" + " {\"name\": \"AWS Redshift security\"},\n" + " {\"name\": \"AWS Aurora security\"}\n" + "]") ) ); } @Test public void testSelectObjectFieldOfArrayValuesItself() { Assume.assumeTrue(isNewQueryEngineEabled()); JSONObject response = new JSONObject(query("SELECT accounts FROM %s")); // Only the first element of the list of is returned. verifyDataRows(response, rows(new JSONObject("{\"id\": 1}"))); } @Test public void testSelectObjectFieldOfArrayValuesInnerFields() { Assume.assumeTrue(isNewQueryEngineEabled()); JSONObject response = new JSONObject(query("SELECT accounts.id FROM %s")); // Only the first element of the list of is returned. verifyDataRows(response, rows(1)); } private String query(String sql) { return executeQuery( StringUtils.format(sql, TEST_INDEX_DEEP_NESTED), "jdbc" ); } }
1,477
2,112
/** * Autogenerated by Thrift for src/module.thrift * * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING * @generated @nocommit */ #pragma once #include <thrift/lib/cpp2/gen/module_data_h.h> #include "thrift/compiler/test/fixtures/mcpp2-compare/gen-cpp2/module_types.h" namespace apache { namespace thrift { template <> struct TEnumDataStorage<::some::valid::ns::MyEnumA> { using type = ::some::valid::ns::MyEnumA; static constexpr const std::size_t size = 3; static const std::array<type, size> values; static const std::array<folly::StringPiece, size> names; }; template <> struct TEnumDataStorage<::some::valid::ns::AnnotatedEnum> { using type = ::some::valid::ns::AnnotatedEnum; static constexpr const std::size_t size = 3; static const std::array<type, size> values; static const std::array<folly::StringPiece, size> names; }; template <> struct TEnumDataStorage<::some::valid::ns::AnnotatedEnum2> { using type = ::some::valid::ns::AnnotatedEnum2; static constexpr const std::size_t size = 3; static const std::array<type, size> values; static const std::array<folly::StringPiece, size> names; }; template <> struct TEnumDataStorage<::some::valid::ns::MyEnumB> { using type = ::some::valid::ns::MyEnumB; static constexpr const std::size_t size = 1; static const std::array<type, size> values; static const std::array<folly::StringPiece, size> names; }; template <> struct TEnumDataStorage<::some::valid::ns::SimpleUnion::Type> { using type = ::some::valid::ns::SimpleUnion::Type; static constexpr const std::size_t size = 2; static const std::array<type, size> values; static const std::array<folly::StringPiece, size> names; }; template <> struct TEnumDataStorage<::some::valid::ns::ComplexUnion::Type> { using type = ::some::valid::ns::ComplexUnion::Type; static constexpr const std::size_t size = 28; static const std::array<type, size> values; static const std::array<folly::StringPiece, size> names; }; template <> struct TEnumDataStorage<::some::valid::ns::FloatUnion::Type> { using type = ::some::valid::ns::FloatUnion::Type; static constexpr const std::size_t size = 2; static const std::array<type, size> values; static const std::array<folly::StringPiece, size> names; }; template <> struct TStructDataStorage<::some::valid::ns::Empty> { static constexpr const std::size_t fields_size = 0; static const std::array<folly::StringPiece, fields_size> fields_names; static const std::array<int16_t, fields_size> fields_ids; static const std::array<protocol::TType, fields_size> fields_types; }; template <> struct TStructDataStorage<::some::valid::ns::ASimpleStruct> { static constexpr const std::size_t fields_size = 1; static const std::array<folly::StringPiece, fields_size> fields_names; static const std::array<int16_t, fields_size> fields_ids; static const std::array<protocol::TType, fields_size> fields_types; }; template <> struct TStructDataStorage<::some::valid::ns::ASimpleStructNoexcept> { static constexpr const std::size_t fields_size = 1; static const std::array<folly::StringPiece, fields_size> fields_names; static const std::array<int16_t, fields_size> fields_ids; static const std::array<protocol::TType, fields_size> fields_types; }; template <> struct TStructDataStorage<::some::valid::ns::MyStruct> { static constexpr const std::size_t fields_size = 11; static const std::array<folly::StringPiece, fields_size> fields_names; static const std::array<int16_t, fields_size> fields_ids; static const std::array<protocol::TType, fields_size> fields_types; }; template <> struct TStructDataStorage<::some::valid::ns::SimpleUnion> { static constexpr const std::size_t fields_size = 2; static const std::array<folly::StringPiece, fields_size> fields_names; static const std::array<int16_t, fields_size> fields_ids; static const std::array<protocol::TType, fields_size> fields_types; }; template <> struct TStructDataStorage<::some::valid::ns::ComplexUnion> { static constexpr const std::size_t fields_size = 28; static const std::array<folly::StringPiece, fields_size> fields_names; static const std::array<int16_t, fields_size> fields_ids; static const std::array<protocol::TType, fields_size> fields_types; }; template <> struct TStructDataStorage<::some::valid::ns::AnException> { static constexpr const std::size_t fields_size = 17; static const std::array<folly::StringPiece, fields_size> fields_names; static const std::array<int16_t, fields_size> fields_ids; static const std::array<protocol::TType, fields_size> fields_types; }; template <> struct TStructDataStorage<::some::valid::ns::AnotherException> { static constexpr const std::size_t fields_size = 3; static const std::array<folly::StringPiece, fields_size> fields_names; static const std::array<int16_t, fields_size> fields_ids; static const std::array<protocol::TType, fields_size> fields_types; }; template <> struct TStructDataStorage<::some::valid::ns::containerStruct> { static constexpr const std::size_t fields_size = 46; static const std::array<folly::StringPiece, fields_size> fields_names; static const std::array<int16_t, fields_size> fields_ids; static const std::array<protocol::TType, fields_size> fields_types; }; template <> struct TStructDataStorage<::some::valid::ns::MyIncludedStruct> { static constexpr const std::size_t fields_size = 4; static const std::array<folly::StringPiece, fields_size> fields_names; static const std::array<int16_t, fields_size> fields_ids; static const std::array<protocol::TType, fields_size> fields_types; }; template <> struct TStructDataStorage<::some::valid::ns::AnnotatedStruct> { static constexpr const std::size_t fields_size = 40; static const std::array<folly::StringPiece, fields_size> fields_names; static const std::array<int16_t, fields_size> fields_ids; static const std::array<protocol::TType, fields_size> fields_types; }; template <> struct TStructDataStorage<::some::valid::ns::ComplexContainerStruct> { static constexpr const std::size_t fields_size = 2; static const std::array<folly::StringPiece, fields_size> fields_names; static const std::array<int16_t, fields_size> fields_ids; static const std::array<protocol::TType, fields_size> fields_types; }; template <> struct TStructDataStorage<::some::valid::ns::FloatStruct> { static constexpr const std::size_t fields_size = 2; static const std::array<folly::StringPiece, fields_size> fields_names; static const std::array<int16_t, fields_size> fields_ids; static const std::array<protocol::TType, fields_size> fields_types; }; template <> struct TStructDataStorage<::some::valid::ns::FloatUnion> { static constexpr const std::size_t fields_size = 2; static const std::array<folly::StringPiece, fields_size> fields_names; static const std::array<int16_t, fields_size> fields_ids; static const std::array<protocol::TType, fields_size> fields_types; }; template <> struct TStructDataStorage<::some::valid::ns::AllRequiredNoExceptMoveCtrStruct> { static constexpr const std::size_t fields_size = 1; static const std::array<folly::StringPiece, fields_size> fields_names; static const std::array<int16_t, fields_size> fields_ids; static const std::array<protocol::TType, fields_size> fields_types; }; }} // apache::thrift
2,409
348
{"nom":"Montsoreau","circ":"4ème circonscription","dpt":"Maine-et-Loire","inscrits":346,"abs":144,"votants":202,"blancs":0,"nuls":4,"exp":198,"res":[{"nuance":"REM","nom":"Mme <NAME>","voix":81},{"nuance":"UDI","nom":"<NAME>","voix":31},{"nuance":"FN","nom":"M. <NAME>","voix":23},{"nuance":"FI","nom":"Mme <NAME>","voix":19},{"nuance":"SOC","nom":"Mme <NAME>","voix":12},{"nuance":"ECO","nom":"Mme <NAME>","voix":10},{"nuance":"DVD","nom":"M. <NAME>","voix":9},{"nuance":"DLF","nom":"M. <NAME>","voix":5},{"nuance":"DIV","nom":"Mme <NAME>","voix":3},{"nuance":"COM","nom":"Mme <NAME>","voix":2},{"nuance":"DIV","nom":"M. <NAME>","voix":2},{"nuance":"DVD","nom":"M. <NAME>","voix":1},{"nuance":"EXG","nom":"M. <NAME>","voix":0}]}
299
1,531
// Copyright (C) 2000 - 2012 <NAME> // All rights reserved. // // This file is part of The Grinder software distribution. Refer to // the file LICENSE which is part of The Grinder distribution for // licensing details. The Grinder distribution is available on the // Internet at http://grinder.sourceforge.net/ // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE // COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED // OF THE POSSIBILITY OF SUCH DAMAGE. package net.grinder.console.model; import net.grinder.common.GrinderException; import net.grinder.common.Test; import net.grinder.console.common.ErrorHandler; import net.grinder.console.common.Resources; import net.grinder.statistics.*; import net.grinder.util.ListenerSupport; import java.util.*; /** * Collate test reports into samples and distribute to listeners. * <p> * NHN Customized version * * When notifying listeners of changes to the number of tests we send copies of the new index * arrays. This helps because most listeners are Swing dispatched and so can't guarantee the model * is in a reasonable state when they call back. * </p> * * @author Grinder Developers. * @author <NAME> (modified for nGrinder) * @since 3.0 */ public class SampleModelImplementationEx implements SampleModel { private final ConsoleProperties m_properties; private final StatisticsServices m_statisticsServices; private final Timer m_timer; private final ErrorHandler m_errorHandler; private final String m_stateIgnoringString; private final String m_stateWaitingString; private final String m_stateStoppedString; private final String m_stateCapturingString; private final String m_unknownTestString; /** * The current test set. A TreeSet is used to maintain the test order. Guarded by itself. */ private final Set<Test> m_tests = new TreeSet<Test>(); private final ListenerSupport<Listener> m_listeners = new ListenerSupport<Listener>(); private final StatisticsIndexMap.LongIndex m_periodIndex; private final StatisticExpression m_tpsExpression; private final PeakStatisticExpression m_peakTPSExpression; private final SampleAccumulatorEx m_totalSampleAccumulator; private ModelTestIndex modelTestIndex; /** * A {@link SampleAccumulator} for each test. Guarded by itself. */ private final Map<Test, SampleAccumulator> m_accumulators = Collections .synchronizedMap(new HashMap<Test, SampleAccumulator>()); // Guarded by this. private InternalState m_state; /** * Creates a new <code>SampleModelImplementation</code> instance. * * @param properties The console properties. * @param statisticsServices Statistics services. * @param timer A timer. * @param resources Console resources. * @param errorHandler Error handler. * @exception GrinderException if an error occurs */ public SampleModelImplementationEx(ConsoleProperties properties, StatisticsServices statisticsServices, Timer timer, Resources resources, ErrorHandler errorHandler) throws GrinderException { m_properties = properties; m_statisticsServices = statisticsServices; m_timer = timer; m_errorHandler = errorHandler; m_stateIgnoringString = resources.getString("state.ignoring.label") + ' '; m_stateWaitingString = resources.getString("state.waiting.label"); m_stateStoppedString = resources.getString("state.stopped.label"); m_stateCapturingString = resources.getString("state.capturing.label") + ' '; m_unknownTestString = resources.getString("ignoringUnknownTest.text"); final StatisticsIndexMap indexMap = statisticsServices.getStatisticsIndexMap(); m_periodIndex = indexMap.getLongIndex("period"); final StatisticExpressionFactory statisticExpressionFactory = m_statisticsServices .getStatisticExpressionFactory(); m_tpsExpression = statisticsServices.getTPSExpression(); m_peakTPSExpression = statisticExpressionFactory .createPeak(indexMap.getDoubleIndex("peakTPS"), m_tpsExpression); m_totalSampleAccumulator = new SampleAccumulatorEx(m_peakTPSExpression, m_periodIndex, m_statisticsServices.getStatisticsSetFactory()); setInternalState(new WaitingForTriggerState()); } /** * Get the expression for TPS. * * @return The TPS expression for this model. */ public StatisticExpression getTPSExpression() { return m_tpsExpression; } /** * Get the expression for peak TPS. * * @return The peak TPS expression for this model. */ public StatisticExpression getPeakTPSExpression() { return m_peakTPSExpression; } /** * Register new tests. * * @param tests The new tests. */ public void registerTests(Collection<Test> tests) { // Need to copy collection, might be immutable. final Set<Test> newTests = new HashSet<Test>(tests); final Test[] testArray; synchronized (m_tests) { newTests.removeAll(m_tests); if (newTests.size() == 0) { // No new tests. return; } m_tests.addAll(newTests); // Create an index of m_tests sorted by test number. testArray = m_tests.toArray(new Test[m_tests.size()]); } final SampleAccumulator[] accumulatorArray = new SampleAccumulator[testArray.length]; synchronized (m_accumulators) { for (Test test : newTests) { m_accumulators.put( test, new SampleAccumulator(m_peakTPSExpression, m_periodIndex, m_statisticsServices .getStatisticsSetFactory())); } for (int i = 0; i < accumulatorArray.length; i++) { accumulatorArray[i] = m_accumulators.get(testArray[i]); } } final ModelTestIndex modelTestIndex = new ModelTestIndex(testArray, accumulatorArray); this.modelTestIndex = modelTestIndex; m_listeners.apply(new ListenerSupport.Informer<Listener>() { public void inform(Listener l) { l.newTests(newTests, modelTestIndex); } }); } /** * Get the cumulative statistics for this model. * * @return The cumulative statistics. */ public StatisticsSet getTotalCumulativeStatistics() { return m_totalSampleAccumulator.getCumulativeStatistics(); } /** * Add a new model listener. * * @param listener The listener. */ public void addModelListener(Listener listener) { m_listeners.add(listener); } /** * Add a new sample listener for the specific test. * * @param test The test to add the sample listener for. * @param listener The sample listener. */ public void addSampleListener(Test test, SampleListener listener) { final SampleAccumulator sampleAccumulator = m_accumulators.get(test); if (sampleAccumulator != null) { sampleAccumulator.addSampleListener(listener); } } /** * Add a new total sample listener. * * @param listener The sample listener. */ public void addTotalSampleListener(SampleListener listener) { m_totalSampleAccumulator.addSampleListener(listener); } /** * Reset the model. * * <p> * This doesn't affect our internal state, just the statistics and the listeners. * </p> */ public void reset() { synchronized (m_tests) { m_tests.clear(); } m_accumulators.clear(); m_totalSampleAccumulator.zero(); m_listeners.apply(new ListenerSupport.Informer<Listener>() { public void inform(Listener l) { l.resetTests(); } }); } /** * Start the model. */ public void start() { getInternalState().start(); } /** * Stop the model. */ public void stop() { getInternalState().stop(); } /** * Add a new test report. * * @param testStatisticsMap * The new test statistics. */ public void addTestReport(TestStatisticsMap testStatisticsMap) { getInternalState().newTestReport(testStatisticsMap); } /** * Get the current model state. * * @return The model state. */ public State getState() { return getInternalState().toExternalState(); } /** * Zero the accumulators. */ public void zero() { synchronized (m_accumulators) { for (SampleAccumulator sampleAccumulator : m_accumulators.values()) { sampleAccumulator.zero(); } } m_totalSampleAccumulator.zero(); } private InternalState getInternalState() { synchronized (this) { return m_state; } } private void setInternalState(InternalState newState) { synchronized (this) { m_state = newState; } m_listeners.apply(new ListenerSupport.Informer<Listener>() { public void inform(Listener l) { l.stateChanged(); } }); } private interface InternalState { State toExternalState(); void start(); void stop(); void newTestReport(TestStatisticsMap testStatisticsMap); } private abstract class AbstractInternalState implements InternalState, State { protected final boolean isActiveState() { return getInternalState() == this; } public State toExternalState() { // We don't bother cloning the state, only the description varies. return this; } public void start() { // Valid transition for all states. setInternalState(new WaitingForTriggerState()); } public void stop() { // Valid transition for all states. setInternalState(new StoppedState()); } } private final class WaitingForTriggerState extends AbstractInternalState { public WaitingForTriggerState() { zero(); } public void newTestReport(TestStatisticsMap testStatisticsMap) { if (m_properties.getIgnoreSampleCount() == 0) { setInternalState(new CapturingState()); } else { setInternalState(new TriggeredState()); } // Ensure the the first sample is recorded. getInternalState().newTestReport(testStatisticsMap); } public String getDescription() { return m_stateWaitingString; } public boolean isCapturing() { return false; } public boolean isStopped() { return false; } } private final class StoppedState extends AbstractInternalState { public void newTestReport(TestStatisticsMap testStatisticsMap) { // nothing to do } public String getDescription() { return m_stateStoppedString; } public boolean isStopped() { return true; } public boolean isCapturing() { return false; } } private abstract class AbstractSamplingState extends AbstractInternalState { // Guarded by this. private long mlastTime = 0; private volatile long msampleCount = 1; public void newTestReport(TestStatisticsMap testStatisticsMap) { (testStatisticsMap.new ForEach() { public void next(Test test, StatisticsSet statistics) { final SampleAccumulator sampleAccumulator = m_accumulators.get(test); synchronized (m_accumulators) { if (sampleAccumulator == null) { m_errorHandler.handleInformationMessage(m_unknownTestString + " " + test); } else { sampleAccumulator.addIntervalStatistics(statistics); if (shouldAccumulateSamples()) { sampleAccumulator.addCumulativeStaticstics(statistics); } if (!statistics.isComposite()) { m_totalSampleAccumulator.addIntervalStatistics(statistics); if (shouldAccumulateSamples()) { m_totalSampleAccumulator.addCumulativeStatistics(statistics); } } } } } // CHECKSTYLE:OFF }).iterate(); } protected void schedule() { synchronized (this) { if (mlastTime == 0) { mlastTime = System.currentTimeMillis(); } } m_timer.schedule(new TimerTask() { public void run() { sample(); } }, m_properties.getSampleInterval()); } public final void sample() { if (!isActiveState()) { return; } try { final long period; synchronized (this) { period = System.currentTimeMillis() - mlastTime; } final long sampleInterval = m_properties.getSampleInterval(); SampleAccumulatorEx totalSampleAccumulatorSnapshot; synchronized (m_accumulators) { for (SampleAccumulator sampleAccumulator : m_accumulators.values()) { sampleAccumulator.fireSample(sampleInterval, period); } totalSampleAccumulatorSnapshot = new SampleAccumulatorEx(m_totalSampleAccumulator); m_totalSampleAccumulator.refreshIntervalStatistics(sampleInterval, period); } totalSampleAccumulatorSnapshot.fireSample(sampleInterval, period); ++msampleCount; // I'm ignoring a minor race here: the model could have been // stopped // after the task was started. // We call setInternalState() even if the InternalState hasn't // changed since we've altered the sample count. if (getInternalState() instanceof StoppedState) { return; } setInternalState(nextState()); m_listeners.apply(new ListenerSupport.Informer<Listener>() { public void inform(Listener l) { l.newSample(); } }); } finally { synchronized (this) { if (isActiveState()) { schedule(); } } } } public final long getSampleCount() { return msampleCount; } protected abstract boolean shouldAccumulateSamples(); protected abstract InternalState nextState(); } private final class TriggeredState extends AbstractSamplingState { public TriggeredState() { schedule(); } protected boolean shouldAccumulateSamples() { return false; } protected InternalState nextState() { if (getSampleCount() > m_properties.getIgnoreSampleCount()) { return new CapturingState(); } return this; } public String getDescription() { return m_stateIgnoringString + getSampleCount(); } public boolean isCapturing() { return false; } public boolean isStopped() { return false; } } private final class CapturingState extends AbstractSamplingState { public CapturingState() { zero(); schedule(); } protected boolean shouldAccumulateSamples() { return true; } protected InternalState nextState() { final int collectSampleCount = m_properties.getCollectSampleCount(); if (collectSampleCount != 0 && getSampleCount() > collectSampleCount) { return new StoppedState(); } return this; } public String getDescription() { return m_stateCapturingString + getSampleCount(); } public boolean isCapturing() { return true; } public boolean isStopped() { return false; } } @SuppressWarnings("UnusedDeclaration") public ModelTestIndex getModelTestIndex() { return modelTestIndex; } public StatisticsIndexMap.LongIndex getPeriodIndex() { return m_periodIndex; } public int getSampleInterval() { return m_properties.getSampleInterval(); } }
5,065
971
<gh_stars>100-1000 package com.ucar.datalink.writer.es.client.rest.utils; import com.alibaba.fastjson.JSONObject; public class JsonUtil { public static Object getValueByPath(JSONObject obj, String path) { String[] keys = path.split("/"); Object tem = obj; for(String key : keys) { if(tem instanceof JSONObject) { tem = ((JSONObject)tem).get(key); }else{ return null; } } return tem; } }
188
1,338
<reponame>Kirishikesan/haiku /* * Copyright 2006, Haiku Inc. All rights reserved. * Distributed under the terms of the MIT License. * * Authors: * <NAME> <<EMAIL>> */ #ifndef EHCI_HARDWARE_H #define EHCI_HARDWARE_H // Host Controller Capability Registers (EHCI Spec 2.2) #define EHCI_CAPLENGTH 0x00 // Capability Register Length #define EHCI_HCIVERSION 0x02 // Interface Version Number #define EHCI_HCSPARAMS 0x04 // Structural Parameters #define EHCI_HCCPARAMS 0x08 // Capability Parameters #define EHCI_HCSP_PORTROUTE 0x0c // Companion Port Route Description // Host Controller Operational Registers (EHCI Spec 2.3) #define EHCI_USBCMD 0x00 // USB Command #define EHCI_USBSTS 0x04 // USB Status #define EHCI_USBINTR 0x08 // USB Interrupt Enable #define EHCI_FRINDEX 0x0c // USB Frame Index #define EHCI_CTRDSSEGMENT 0x10 // 4GB Segment Selector #define EHCI_PERIODICLISTBASE 0x14 // Frame List Base Address #define EHCI_ASYNCLISTADDR 0x18 // Next Asynchronous List Address #define EHCI_CONFIGFLAG 0x40 // Configured Flag Register #define EHCI_PORTSC 0x44 // Port Status/Control // USB Command Register (EHCI Spec 2.3.1) #define EHCI_USBCMD_ITC_SHIFT 16 // Interrupt Threshold Control #define EHCI_USBCMD_ITC_MASK 0xff #define EHCI_USBCMD_PPCEE (1 << 15) // Per-Port Change Events Enable #define EHCI_USBCMD_FSP (1 << 14) // Fully Synchronized Prefetch #define EHCI_USBCMD_ASPE (1 << 13) // Async Schedule Prefetch Enable #define EHCI_USBCMD_PSPE (1 << 12) // Periodic Schedule Prefetch Enable #define EHCI_USBCMD_ASPME (1 << 11) // Async Schedule Park Mode Enable #define EHCI_USBCMD_ASPMC_SHIFT 8 // Async Schedule Park Mode Count #define EHCI_USBCMD_ASPMC_MASK 0x03 #define EHCI_USBCMD_LHCRESET (1 << 7) // Light Host Controller Reset #define EHCI_USBCMD_INTONAAD (1 << 6) // Interrupt on Async Advance Doorbell #define EHCI_USBCMD_ASENABLE (1 << 5) // Asynchronous Schedule Enable #define EHCI_USBCMD_PSENABLE (1 << 4) // Periodic Schedule Enable #define EHCI_USBCMD_FLS_SHIFT 2 // Frame List Size #define EHCI_USBCMD_FLS_MASK 0x03 #define EHCI_USBCMD_HCRESET (1 << 1) // Host Controller Reset #define EHCI_USBCMD_RUNSTOP (1 << 0) // Run/Stop // USB Status Register (EHCI Spec 2.3.2) #define EHCI_USBSTS_ASSTATUS (1 << 15) // Asynchronous Schedule Status #define EHCI_USBSTS_PSSTATUS (1 << 14) // Periodic Schedule Status #define EHCI_USBSTS_RECLAMATION (1 << 13) // Reclamation #define EHCI_USBSTS_HCHALTED (1 << 12) // Host Controller Halted #define EHCI_USBSTS_INTONAA (1 << 5) // Interrupt on Async Advance #define EHCI_USBSTS_HOSTSYSERR (1 << 4) // Host System Error #define EHCI_USBSTS_FLROLLOVER (1 << 3) // Frame List Rollover #define EHCI_USBSTS_PORTCHANGE (1 << 2) // Port Change Detected #define EHCI_USBSTS_USBERRINT (1 << 1) // USB Error Interrupt #define EHCI_USBSTS_USBINT (1 << 0) // USB Interrupt #define EHCI_USBSTS_INTMASK 0x3f // USB Interrupt Enable Register (EHCI Spec 2.3.3) #define EHCI_USBINTR_INTONAA (1 << 5) // Interrupt on Async Advance Enable #define EHCI_USBINTR_HOSTSYSERR (1 << 4) // Host System Error Enable #define EHCI_USBINTR_FLROLLOVER (1 << 3) // Frame List Rollover Enable #define EHCI_USBINTR_PORTCHANGE (1 << 2) // Port Change Interrupt Enable #define EHCI_USBINTR_USBERRINT (1 << 1) // USB Error Interrupt Enable #define EHCI_USBINTR_USBINT (1 << 0) // USB Interrupt Enable // Configure Flag Register (EHCI Spec 2.3.8) #define EHCI_CONFIGFLAG_FLAG (1 << 0) // Configure Flag // Port Status and Control (EHCI Spec 2.3.9) #define EHCI_PORTSC_WAKEOVERCUR (1 << 22) // Wake on Over-Current Enable #define EHCI_PORTSC_WAKEDISCON (1 << 21) // Wake on Disconnect Enable #define EHCI_PORTSC_WAKECONNECT (1 << 20) // Wake on Connect Enable #define EHCI_PORTSC_PTC_SHIFT 16 // Port Test Control #define EHCI_PORTSC_PTC_MASK 0x07 #define EHCI_PORTSC_PIC_SHIFT 14 // Port Indicator Control #define EHCI_PORTSC_PIC_MASK 0x03 #define EHCI_PORTSC_PORTOWNER (1 << 13) // Port Owner #define EHCI_PORTSC_PORTPOWER (1 << 12) // Port Power #define EHCI_PORTSC_DPLUS (1 << 11) // Logical Level of D+ #define EHCI_PORTSC_DMINUS (1 << 10) // Logical Level of D- #define EHCI_PORTSC_PORTRESET (1 << 8) // Port Reset #define EHCI_PORTSC_SUSPEND (1 << 7) // Suspend #define EHCI_PORTSC_FORCERESUME (1 << 6) // Force Port Resume #define EHCI_PORTSC_OCCHANGE (1 << 5) // Over-Current Change #define EHCI_PORTSC_OCACTIVE (1 << 4) // Over-Current Active #define EHCI_PORTSC_ENABLECHANGE (1 << 3) // Port Enable/Disable Change #define EHCI_PORTSC_ENABLE (1 << 2) // Port Enabled/Disabled #define EHCI_PORTSC_CONNCHANGE (1 << 1) // Connect Status Change #define EHCI_PORTSC_CONNSTATUS (1 << 0) // Current Connect Status #define EHCI_PORTSC_DATAMASK 0xffffffd5 // Extended Capabilities #define EHCI_ECP_SHIFT 8 // Extended Capability Pointer #define EHCI_ECP_MASK 0xff #define EHCI_LEGSUP_CAPID_MASK 0xff #define EHCI_LEGSUP_CAPID 0x01 #define EHCI_LEGSUP_OSOWNED (1 << 24) // OS Owned Semaphore #define EHCI_LEGSUP_BIOSOWNED (1 << 16) // BIOS Owned Semaphore #define EHCI_HCCPARAMS_FPLC (1 << 19) // 32 Frames Period List #define EHCI_HCCPARAMS_PPCEC (1 << 18) // Per-Port Change Event #define EHCI_HCCPARAMS_LPM (1 << 17) // Link Power Management #define EHCI_HCCPARAMS_HP (1 << 16) // Hardware Prefetch #define EHCI_HCCPARAMS_FRAME_CACHE(x) ((x >> 7) & 0x1) // Isochronous Periodic Threshold #define EHCI_HCCPARAMS_IPT(x) ((x >> 4) & 0x7) // Isochronous Periodic Threshold // Data Structures (EHCI Spec 3) // Applies to ehci_qh.next_phy, ehci_sitd.next_phy, ehci_itd.next_phy #define EHCI_ITEM_TYPE_ITD (0 << 1) // Isochronous Transfer Descriptor #define EHCI_ITEM_TYPE_QH (1 << 1) // Queue Head #define EHCI_ITEM_TYPE_SITD (2 << 1) // Split Transaction Isochronous TD #define EHCI_ITEM_TYPE_FSTN (3 << 1) // Frame Span Traversal Node #define EHCI_ITEM_TERMINATE (1 << 0) // Terminate // Isochronous (High-Speed) Transfer Descriptors (iTD, EHCI Spec 3.2) typedef struct ehci_itd { // Hardware Part uint32 next_phy; uint32 token[8]; uint32 buffer_phy[7]; uint32 ext_buffer_phy[7]; // Software Part uint32 this_phy; struct ehci_itd *next; struct ehci_itd *prev; uint32 last_token; } ehci_itd; #define EHCI_ITD_TOFFSET_SHIFT 0 #define EHCI_ITD_TOFFSET_MASK 0x0fff #define EHCI_ITD_IOC (1 << 15) #define EHCI_ITD_PG_SHIFT 12 #define EHCI_ITD_PG_MASK 0x07 #define EHCI_ITD_TLENGTH_SHIFT 16 #define EHCI_ITD_TLENGTH_MASK 0x0fff #define EHCI_ITD_STATUS_SHIFT 28 #define EHCI_ITD_STATUS_MASK 0xf #define EHCI_ITD_STATUS_ACTIVE (1 << 3) // Active #define EHCI_ITD_STATUS_BUFFER (1 << 2) // Data Buffer Error #define EHCI_ITD_STATUS_BABBLE (1 << 1) // Babble Detected #define EHCI_ITD_STATUS_TERROR (1 << 0) // Transaction Error #define EHCI_ITD_ADDRESS_SHIFT 0 #define EHCI_ITD_ADDRESS_MASK 0x7f #define EHCI_ITD_ENDPOINT_SHIFT 8 #define EHCI_ITD_ENDPOINT_MASK 0xf #define EHCI_ITD_DIR_SHIFT 11 #define EHCI_ITD_MUL_SHIFT 0 #define EHCI_ITD_MUL_MASK 0x3 #define EHCI_ITD_BUFFERPOINTER_SHIFT 12 #define EHCI_ITD_BUFFERPOINTER_MASK 0xfffff #define EHCI_ITD_MAXPACKETSIZE_SHIFT 0 #define EHCI_ITD_MAXPACKETSIZE_MASK 0x7ff #define EHCI_ITD_MAXPACKETSIZE_LENGTH 11 // Split Transaction Isochronous Transfer Descriptors (siTD, EHCI Spec 3.3) typedef struct ehci_sitd { // Hardware Part uint32 next_phy; uint8 port_number; uint8 hub_address; uint8 endpoint; uint8 device_address; uint16 reserved1; uint8 cmask; uint8 smask; uint16 transfer_length; uint8 cprogmask; uint8 status; uint32 buffer_phy[2]; uint32 back_phy; uint32 ext_buffer_phy[2]; // Software Part uint32 this_phy; struct ehci_sitd *next; struct ehci_sitd *prev; size_t buffer_size; void *buffer_log; } _PACKED ehci_sitd; // Queue Element Transfer Descriptors (qTD, EHCI Spec 3.5) typedef struct ehci_qtd { // Hardware Part uint32 next_phy; uint32 alt_next_phy; uint32 token; uint32 buffer_phy[5]; uint32 ext_buffer_phy[5]; // Software Part uint32 this_phy; struct ehci_qtd *next_log; struct ehci_qtd *alt_next_log; size_t buffer_size; void *buffer_log; } _PACKED ehci_qtd; #define EHCI_QTD_DATA_TOGGLE (1U << 31) #define EHCI_QTD_BYTES_SHIFT 16 #define EHCI_QTD_BYTES_MASK 0x7fff #define EHCI_QTD_IOC (1 << 15) #define EHCI_QTD_CPAGE_SHIFT 12 #define EHCI_QTD_CPAGE_MASK 0x07 #define EHCI_QTD_ERRCOUNT_SHIFT 10 #define EHCI_QTD_ERRCOUNT_MASK 0x03 #define EHCI_QTD_PID_SHIFT 8 #define EHCI_QTD_PID_MASK 0x03 #define EHCI_QTD_PID_OUT 0x00 #define EHCI_QTD_PID_IN 0x01 #define EHCI_QTD_PID_SETUP 0x02 #define EHCI_QTD_STATUS_SHIFT 0 #define EHCI_QTD_STATUS_MASK 0x7f #define EHCI_QTD_STATUS_ERRMASK 0x50 #define EHCI_QTD_STATUS_ACTIVE (1 << 7) // Active #define EHCI_QTD_STATUS_HALTED (1 << 6) // Halted #define EHCI_QTD_STATUS_BUFFER (1 << 5) // Data Buffer Error #define EHCI_QTD_STATUS_BABBLE (1 << 4) // Babble Detected #define EHCI_QTD_STATUS_TERROR (1 << 3) // Transaction Error #define EHCI_QTD_STATUS_MISSED (1 << 2) // Missed Micro-Frame #define EHCI_QTD_STATUS_SPLIT (1 << 1) // Split Transaction State #define EHCI_QTD_STATUS_PING (1 << 0) // Ping State #define EHCI_QTD_STATUS_LS_ERR (1 << 0) // Full-/Lowspeed Error #define EHCI_QTD_PAGE_MASK 0xfffff000 // Queue Head (QH, EHCI Spec 3.6) typedef struct ehci_qh { // Hardware Part uint32 next_phy; uint32 endpoint_chars; uint32 endpoint_caps; uint32 current_qtd_phy; struct { uint32 next_phy; uint32 alt_next_phy; uint32 token; uint32 buffer_phy[5]; uint32 ext_buffer_phy[5]; } overlay; // Software Part uint32 this_phy; struct ehci_qh *next_log; struct ehci_qh *prev_log; ehci_qtd *stray_log; ehci_qtd *element_log; } ehci_qh; typedef struct { ehci_qh queue_head; #ifdef B_HAIKU_64_BIT uint32 padding[6]; #else uint32 padding[2]; #endif } interrupt_entry; typedef struct { ehci_itd itd; #ifdef B_HAIKU_64_BIT uint32 padding[1]; // align on 128 #else uint32 padding[5]; // align on 128 #endif } itd_entry; typedef struct { ehci_sitd sitd; #ifdef B_HAIKU_64_BIT uint32 padding[14]; // align on 64 #else uint32 padding[2]; // align on 64 #endif } sitd_entry; #define EHCI_INTERRUPT_ENTRIES_COUNT (7 + 1) // (log 128 / log 2) + 1 #define EHCI_VFRAMELIST_ENTRIES_COUNT 128 #define EHCI_FRAMELIST_ENTRIES_COUNT 1024 #define MAX_AVAILABLE_BANDWIDTH 125 // Microseconds // Applies to ehci_qh.endpoint_chars #define EHCI_QH_CHARS_RL_SHIFT 28 // NAK Count Reload #define EHCI_QH_CHARS_RL_MASK 0x07 #define EHCI_QH_CHARS_CONTROL (1 << 27) // Control Endpoint Flag #define EHCI_QH_CHARS_MPL_SHIFT 16 // Max Packet Length #define EHCI_QH_CHARS_MPL_MASK 0x03ff #define EHCI_QH_CHARS_RECHEAD (1 << 15) // Head of Reclamation List Flag #define EHCI_QH_CHARS_TOGGLE (1 << 14) // Data Toggle Control #define EHCI_QH_CHARS_EPS_FULL (0 << 12) // Endpoint is Full-Speed #define EHCI_QH_CHARS_EPS_LOW (1 << 12) // Endpoint is Low-Speed #define EHCI_QH_CHARS_EPS_HIGH (2 << 12) // Endpoint is High-Speed #define EHCI_QH_CHARS_EPT_SHIFT 8 // Endpoint Number #define EHCI_QH_CHARS_EPT_MASK 0x0f #define EHCI_QH_CHARS_INACTIVE (1 << 7) // Inactive on Next Transaction #define EHCI_QH_CHARS_DEV_SHIFT 0 // Device Address #define EHCI_QH_CHARS_DEV_MASK 0x7f // Applies to ehci_qh.endpoint_caps #define EHCI_QH_CAPS_MULT_SHIFT 30 // Transactions per Micro-Frame #define EHCI_QH_CAPS_MULT_MASK 0x03 #define EHCI_QH_CAPS_PORT_SHIFT 23 // Hub Port (Split-Transaction) #define EHCI_QH_CAPS_PORT_MASK 0x7f #define EHCI_QH_CAPS_HUB_SHIFT 16 // Hub Address (Split-Transaction) #define EHCI_QH_CAPS_HUB_MASK 0x7f #define EHCI_QH_CAPS_SCM_SHIFT 8 // Split Completion Mask #define EHCI_QH_CAPS_SCM_MASK 0xff #define EHCI_QH_CAPS_ISM_SHIFT 0 // Interrupt Schedule Mask #define EHCI_QH_CAPS_ISM_MASK 0xff // Applies to ehci_qh.overlay[EHCI_QH_OL_*_INDEX] #define EHCI_QH_OL_NAK_INDEX 1 // NAK Counter #define EHCI_QH_OL_NAK_SHIFT 1 #define EHCI_QH_OL_NAK_MASK 0x0f #define EHCI_QH_OL_TOGGLE_INDEX 2 // Data Toggle #define EHCI_QH_OL_TOGGLE (1U << 31) #define EHCI_QH_OL_IOC_INDEX 2 // Interrupt on Complete #define EHCI_QH_OL_IOC (1 << 15) #define EHCI_QH_OL_ERRC_INDEX 2 // Error Counter #define EHCI_QH_OL_ERRC_SHIFT 10 #define EHCI_QH_OL_ERRC_MASK 0x03 #define EHCI_QH_OL_PING_INDEX 2 // Ping State #define EHCI_QH_OL_PING (1 << 0) #define EHCI_QH_OL_CPROG_INDEX 4 // Split-Transaction Complete-Split Progress #define EHCI_QH_OL_CPROG_SHIFT 0 #define EHCI_QH_OL_CPROG_MASK 0xff #define EHCI_QH_OL_FTAG_INDEX 5 // Split-Transaction Frame Tag #define EHCI_QH_OL_FTAG_SHIFT 0 #define EHCI_QH_OL_FTAG_MASK 0x0f #define EHCI_QH_OL_BYTES_INDEX 5 // Transfered Bytes #define EHCI_QH_OL_BYTES_SHIFT 5 #define EHCI_QH_OL_BYTES_MASK 0x7f // ToDo: Periodic Frame Span Traversal Node (FSTN, EHCI Spec 3.7) // Quirk registers and values #define AMD_SBX00_VENDOR 0x1002 #define AMD_SBX00_SMBUS_CONTROLLER 0x4385 #define AMD_SB600_EHCI_CONTROLLER 0x4386 #define AMD_SB700_SB800_EHCI_CONTROLLER 0x4396 #define AMD_SBX00_EHCI_MISC_REGISTER 0x50 // Advanced config register #define AMD_SBX00_EHCI_MISC_DISABLE_PERIODIC_LIST_CACHE (1 << 27) #endif // !EHCI_HARDWARE_H
6,150
9,136
#ifndef GYROSCOPIC_SETUP_H #define GYROSCOPIC_SETUP_H class CommonExampleInterface* GyroscopicCreateFunc(struct CommonExampleOptions& options); #endif //GYROSCOPIC_SETUP_H
69
520
<filename>dev/Tickets/1758.py import numpy as np import random import CoolProp.CoolProp as CP import time random.seed("coolprop_test") p = 101325 # 1 atmosphere T = np.random.uniform(120, 400, 10000) + 273.15 # Random points from 120 to 400 deg C, gas phase only # Make sure the objects exist and create tables if needed normal_state = CP.AbstractState("HEOS", "H2O") tabular_state = CP.AbstractState("BICUBIC&HEOS", "H2O") # Measure execution speed results = {} tmp = time.time() for Ti in T: rho = CP.PropsSI("Dmass", "P", p, "T", Ti, "H2O") results["1. PropsSI"] = time.time() - tmp tmp = time.time() for Ti in T: normal_state.update(CP.PT_INPUTS, p, Ti) rho = normal_state.keyed_output(CP.iDmass) results["2. HEOS"] = time.time() - tmp tmp = time.time() for Ti in T: tabular_state.update(CP.PT_INPUTS, p, Ti) rho = tabular_state.keyed_output(CP.iDmass) results["3. Tables"] = time.time() - tmp # for k in sorted(results): # print("{0} : {1} ms".format(k, results[k]*1e3)) #print("\nDo NOT do this!") tmp = time.time() for Ti in T: normal_state = CP.AbstractState("HEOS", "H2O") normal_state.update(CP.PT_INPUTS, p, Ti) rho = normal_state.keyed_output(CP.iDmass) results["4. HEOS (create state)"] = time.time() - tmp tmp = time.time() for Ti in T: tabular_state = CP.AbstractState("BICUBIC&HEOS", "H2O") tabular_state.update(CP.PT_INPUTS, p, Ti) rho = tabular_state.keyed_output(CP.iDmass) results["5. Tables (create state)"] = time.time() - tmp for k in sorted(results): print("{0} : {1} ms".format(k, results[k] * 1e3))
650
2,023
<reponame>tdiprima/code class Immutable(object): """An immutable class. """ _mutable = False def __setattr__(self, name,value): if self._mutable or name == '_mutable': super(Immutable,self).__setattr__(name,value) else: raise TypeError("Can't modify immutable instance") def __delattr__(self,name): if self._mutable: super(Immutable,self).__delattr__(name) else: raise TypeError("Can't modify immutable instance") def mutablemethod(f): def func(self,*args, **kwargs): if isinstance(self,Immutable): old_mutable = self._mutable self._mutable = True res = f(self,*args, **kwargs) self._mutable = old_mutable else: res = f(self,*args, **kwargs) return res return func if __name__ == '__main__': class A(Immutable): ''' Define __init__ can set attributes for instance ''' @mutablemethod def __init__(self,value): super(A,self).__init__(self) self.value = value def try_change(self,value): self.value = value a = A("test") a.try_change("TEST") a.value = "TEST"
659
2,392
<reponame>MelvinG24/dust3d<gh_stars>1000+ // Copyright (c) 2010 GeometryFactory (France). // All rights reserved. // // This file is part of CGAL (www.cgal.org) // // $URL: https://github.com/CGAL/cgal/blob/v5.1/Intersections_3/include/CGAL/Intersections_3/Sphere_3_Triangle_3.h $ // $Id: Sphere_3_Triangle_3.h 52164b1 2019-10-19T15:34:59+02:00 Sébastien Loriot // SPDX-License-Identifier: LGPL-3.0-or-later OR LicenseRef-Commercial // // // Author(s) : <NAME> // #ifndef CGAL_INTERSECTIONS_3_SPHERE_3_TRIANGLE_3_H #define CGAL_INTERSECTIONS_3_SPHERE_3_TRIANGLE_3_H #include <CGAL/Sphere_3.h> #include <CGAL/Triangle_3.h> #include <CGAL/Intersections_3/internal/Triangle_3_Sphere_3_do_intersect.h> namespace CGAL { CGAL_DO_INTERSECT_FUNCTION(Sphere_3, Triangle_3, 3) } #endif // CGAL_INTERSECTIONS_3_SPHERE_3_TRIANGLE_3_H
377
461
public class Solution2 { public TreeNode bstFromPreorder(int[] preorder) { int len = preorder.length; if (len == 0) { return null; } return dfs(preorder, 0, len - 1); } /** * 根据 preorder 的子区间 [left..right] 构建二叉树 * * @param preorder * @param left * @param right * @return */ private TreeNode dfs(int[] preorder, int left, int right) { if (left > right) { return null; } TreeNode root = new TreeNode(preorder[left]); if (left == right) { return root; } // 在区间 [left..right] 里找最后一个小于 preorder[left] 的下标 // 注意这里设置区间的左边界为 left ,不能是 left + 1 // 这是因为考虑到区间只有 2 个元素 [left, right] 的情况,第 1 个部分为空区间,第 2 部分只有一个元素 right int l = left; int r = right; while (l < r) { int mid = l + (r - l + 1) / 2; if (preorder[mid] < preorder[left]) { // 下一轮搜索区间是 [mid, r] l = mid; } else { // 下一轮搜索区间是 [l, mid - 1] r = mid - 1; } } TreeNode leftTree = dfs(preorder, left + 1, l); TreeNode rightTree = dfs(preorder, l + 1, right); root.left = leftTree; root.right = rightTree; return root; } }
870
766
<filename>src/main/java/org/snaker/engine/access/QueryFilter.java<gh_stars>100-1000 /* Copyright 2013-2015 www.snakerflow.com. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.snaker.engine.access; import java.io.Serializable; import org.apache.commons.lang.StringUtils; import org.snaker.engine.helper.AssertHelper; /** * 通用查询过滤器 * @author yuqs * @since 1.2.5 */ public class QueryFilter implements Serializable { /** * */ private static final long serialVersionUID = -8155136377911571881L; public static final String ASC = "asc"; public static final String DESC = "desc"; //排序字段 private String orderBy; //排序类型ASC/DESC private String order; /*********common parameters***********/ /** * 流程定义id */ private String processId; /** * 流程定义版本号 */ private Integer version; /** * 流程实例id */ private String orderId; /** * 任务id */ private String taskId; /** * 创建时间范围 */ private String createTimeStart; private String createTimeEnd; private String operateTime; /** * 操作人员id */ private String[] operators; /** * 名称 */ private String[] names; /** * 显示名称 */ private String displayName; /** * 状态 */ private Integer state; /** * 流程类型 */ private String processType; /** * exclude ids */ private String[] excludedIds; /*********order parameters***********/ /** * 父实例id */ private String parentId; /** * 实例编号 */ private String orderNo; /*********task parameters***********/ /** * 任务类型 */ private Integer taskType; /** * 任务参与类型 */ private Integer performType; public String getProcessId() { return processId; } public QueryFilter setProcessId(String processId) { AssertHelper.notEmpty(processId); this.processId = processId; return this; } public String getOrderId() { return orderId; } public QueryFilter setOrderId(String orderId) { AssertHelper.notEmpty(orderId); this.orderId = orderId; return this; } public String getTaskId() { return taskId; } public QueryFilter setTaskId(String taskId) { AssertHelper.notEmpty(taskId); this.taskId = taskId; return this; } public String getCreateTimeStart() { return createTimeStart; } public QueryFilter setCreateTimeStart(String createTimeStart) { AssertHelper.notEmpty(createTimeStart); this.createTimeStart = createTimeStart; return this; } public String getCreateTimeEnd() { return createTimeEnd; } public QueryFilter setCreateTimeEnd(String createTimeEnd) { AssertHelper.notEmpty(createTimeEnd); this.createTimeEnd = createTimeEnd; return this; } public String[] getOperators() { return operators; } public QueryFilter setOperators(String[] operators) { AssertHelper.notNull(operators); this.operators = operators; return this; } public QueryFilter setOperator(String operator) { AssertHelper.notEmpty(operator); this.operators = new String[1]; this.operators[0] = operator; return this; } public String[] getNames() { return names; } public QueryFilter setNames(String[] names) { AssertHelper.notNull(names); this.names = names; return this; } public QueryFilter setName(String name) { AssertHelper.notEmpty(name); this.names = new String[1]; this.names[0] = name; return this; } public String getDisplayName() { return displayName; } public QueryFilter setDisplayName(String displayName) { AssertHelper.notEmpty(displayName); this.displayName = displayName; return this; } public Integer getState() { return state; } public QueryFilter setState(Integer state) { AssertHelper.notNull(state); this.state = state; return this; } public String getParentId() { return parentId; } public QueryFilter setParentId(String parentId) { AssertHelper.notEmpty(parentId); this.parentId = parentId; return this; } public String getOrderNo() { return orderNo; } public QueryFilter setOrderNo(String orderNo) { AssertHelper.notEmpty(orderNo); this.orderNo = orderNo; return this; } public Integer getTaskType() { return taskType; } public QueryFilter setTaskType(Integer taskType) { AssertHelper.notNull(taskType); this.taskType = taskType; return this; } public Integer getPerformType() { return performType; } public QueryFilter setPerformType(Integer performType) { AssertHelper.notNull(performType); this.performType = performType; return this; } public String[] getExcludedIds() { return excludedIds; } public QueryFilter setExcludedIds(String[] excludedIds) { AssertHelper.notNull(excludedIds); this.excludedIds = excludedIds; return this; } public Integer getVersion() { return version; } public QueryFilter setVersion(Integer version) { AssertHelper.notNull(version); this.version = version; return this; } public String getOperateTime() { return operateTime; } public QueryFilter setOperateTime(String operateTime) { AssertHelper.notEmpty(operateTime); this.operateTime = operateTime; return this; } public String getProcessType() { return processType; } public QueryFilter setProcessType(String processType) { AssertHelper.notEmpty(processType); this.processType = processType; return this; } public String getOrderBy() { return orderBy; } public void setOrderBy(String orderBy) { this.orderBy = orderBy; } public QueryFilter orderBy(String theOrderBy) { setOrderBy(theOrderBy); return this; } public String getOrder() { return order; } /** * 设置排序类型. * @param order 可选值为desc或asc,多个排序字段时用','分隔. */ public void setOrder(String order) { String lowcaseOrder = StringUtils.lowerCase(order); //检查order字符串的合法值 String[] orders = StringUtils.split(lowcaseOrder, ','); for (String orderStr : orders) { if (!StringUtils.equals(DESC, orderStr) && !StringUtils.equals(ASC, orderStr)) { throw new IllegalArgumentException("排序类型[" + orderStr + "]不是合法值"); } } this.order = lowcaseOrder; } public QueryFilter order(String theOrder) { setOrder(theOrder); return this; } /** * 是否已设置排序字段,无默认值. */ public boolean isOrderBySetted() { return (StringUtils.isNotBlank(orderBy) && StringUtils.isNotBlank(order)); } }
2,715
10,287
# Copyright 2019 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Enables module construction to be deferred.""" from sonnet.src import base class Deferred(base.Module): """Defers the construction of another module until the first call. Deferred can be used to declare modules that depend on computed properties of other modules before those modules are defined. This allows users to separate the declaration and use of modules. For example at the start of your program you can declare two modules which are coupled: >>> encoder = snt.Linear(64) >>> decoder = snt.Deferred(lambda: snt.Linear(encoder.input_size)) Later you can use these naturally (note: that using `decoder` first would cause an error since `encoder.input_size` is only defined after `encoder` has been called): >>> x = tf.ones([8, 32]) >>> y = encoder(x) >>> z = decoder(y) # Constructs the Linear encoder by calling the lambda. The result will satisfy the following conditions: >>> assert x.shape == z.shape >>> assert y.shape == [8, 64] >>> assert decoder.input_size == encoder.output_size >>> assert decoder.output_size == encoder.input_size """ def __init__(self, constructor, call_methods=("__call__",), name=None): """Initializes the `Deferred` module. Args: constructor: A no argument callable which constructs the module to defer to. The first time one of the `call_methods` are called the constructor will be run and then the constructed module will be called with the same method and arguments as the deferred module. call_methods: Methods which should trigger construction of the target module. The default value configures this module to construct the first time `__call__` is run. If you want to add methods other than call you should explicitly pass them (optionally), for example `call_methods=("__call__", "encode", "decode")`. name: Name for the deferred module. """ super().__init__(name=name) self._constructor = constructor self._target = None for call_method in call_methods: if call_method == "__call__": # Has to be handled separately because __call__ cannot be overridden at # the instance level. # See: https://docs.python.org/3/reference/datamodel.html#special-lookup continue setattr(self, call_method, _materialize_then_call(self, call_method)) @property @base.no_name_scope def target(self): """Returns the target module. If the constructor has not already run this will trigger construction. Subsequent calls to `target` will return the same instance. Returns: A `Module` instance as created by `self.constructor()` . """ if self._target is None: self._target = self._constructor() self._constructor = None return self._target @base.no_name_scope def __call__(self, *args, **kwargs): return self.target(*args, **kwargs) # pylint: disable=not-callable def __str__(self): return "Deferred({})".format(str(self.target)) def __repr__(self): return "Deferred({})".format(repr(self.target)) def __getattr__(self, name): if name != "_target" and hasattr(self, "_target"): if self._target is not None: return getattr(self._target, name) raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, name)) def __setattr__(self, name, value): if name != "_target" and hasattr(self, "_target"): if self._target is not None: setattr(self._target, name, value) return super().__setattr__(name, value) def __delattr__(self, name): if name != "_target" and hasattr(self, "_target"): if self._target is not None: return delattr(self._target, name) super().__delattr__(name) def _materialize_then_call(module, method_name): def wrapped(*args, **kwargs): return getattr(module.target, method_name)(*args, **kwargs) return wrapped
1,539
542
<gh_stars>100-1000 /* * Copyright ©2018 vbill.cn. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * </p> */ package cn.vbill.middleware.porter.manager.service; import cn.vbill.middleware.porter.manager.core.entity.DataTable; import cn.vbill.middleware.porter.manager.web.page.Page; import java.util.List; /** * 数据表信息表 服务接口类 * * @author: FairyHood * @date: 2018-03-07 13:40:30 * @version: V1.0-auto * @review: FairyHood/2018-03-07 13:40:30 */ public interface DataTableService { /** * 新增 * * @date 2018/8/10 上午11:16 * @param: [dataTable] * @return: java.lang.Integer */ Integer insert(DataTable dataTable); /** * 删除 * * @date 2018/8/10 上午11:18 * @param: [id] * @return: java.lang.Integer */ Integer delete(Long id); /** * 根据id查询 * * @date 2018/8/10 上午11:18 * @param: [id] * @return: cn.vbill.middleware.porter.manager.core.event.DataTable */ DataTable selectById(Long id); /** * 分页 * * @date 2018/8/10 上午11:19 * @param: [page, bankName, beginTime, endTime] * @return: cn.vbill.middleware.porter.manager.web.page.Page<cn.vbill.middleware.porter.manager.core.event.DataTable> */ Page<DataTable> page(Page<DataTable> page, String bankName, String beginTime, String endTime); /** * prefixList * * @date 2018/8/10 上午11:19 * @param: [sourceId] * @return: java.util.List<java.lang.String> */ List<String> prefixList(Long sourceId); /** * tableList * * @date 2018/8/10 上午11:24 * @param: [page, sourceId, prefix, tableName] * @return: cn.vbill.middleware.porter.manager.web.page.Page<java.lang.Object> */ Page<Object> tableList(Page<Object> page, Long sourceId, String prefix, String tableName); /** * dataTableList * * @date 2018/8/10 上午11:24 * @param: [dataTablePage] * @return: cn.vbill.middleware.porter.manager.web.page.Page<cn.vbill.middleware.porter.manager.core.event.DataTable> */ Page<DataTable> dataTableList(Page<DataTable> dataTablePage); }
1,119
519
<gh_stars>100-1000 /* * Copyright 2018 The GraphicsFuzz Project Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.graphicsfuzz.glesworker; public class StandaloneRenderJob { public String fragmentSource; public String uniformsInfo; public String outputFileName; public StandaloneRenderJob(String fragmentSource, String uniformsInfo, String outputFileName) { this.fragmentSource = fragmentSource; this.uniformsInfo = uniformsInfo; this.outputFileName = outputFileName; } }
279
435
{ "copyright_text": null, "description": "This talk presents the concepts and design principles behind new\nlearning modules for teaching undergraduate engineering students to use\nPython to learn. Find the modules at:\nhttps://github.com/engineersCode/EngComp \u2014 I aim to start a community of\neducators sharing and remixing learning modules like these, to teach\nengineering subjects at all levels. The goal of this talk is to set in\nmotion the conversations that can create this community.Presenter(s):\nSpeaker: <NAME>, George Washington University\n", "duration": 1872, "language": "eng", "recorded": "2018-07-13", "related_urls": [ { "label": "Conference schedule", "url": "https://scipy2018.scipy.org/ehome/299527/721463/" }, { "label": "Conference slides", "url": "https://github.com/deniederhut/Slides-SciPyConf-2018" } ], "speakers": [ "<NAME>" ], "tags": [], "thumbnail_url": "https://i.ytimg.com/vi/ynWDsMpauTw/maxresdefault.jpg", "title": "Engineers Code: Re-usable, Open Educational Modules for Engineering Undergraduates", "videos": [ { "type": "youtube", "url": "https://www.youtube.com/watch?v=ynWDsMpauTw" } ] }
431
364
package com.linkedin.dagli.assorted; import com.linkedin.dagli.dag.DAG; import com.linkedin.dagli.dag.DAG1x1; import com.linkedin.dagli.distribution.MostLikelyLabelFromDistribution; import com.linkedin.dagli.liblinear.LiblinearClassification; import com.linkedin.dagli.embedding.classification.Embedded; import com.linkedin.dagli.fasttext.FastTextClassification; import com.linkedin.dagli.text.token.Tokens; import java.util.Locale; /** * In this example we predict the character who uttered a given line of dialog from a Shakespearean play by using a * FastText model to produce embeddings which we then feed to logistic regression in a simple pipeline. */ public class FastTextEmbeddingAndLiblinearExample { private FastTextEmbeddingAndLiblinearExample() { } /** * Creates the DAG that will be prepared (trained) to predict Shakespeare character from dialog text. * * The DAG accepts a {@link CharacterDialog} with both the character name and a line of their dialog; the character * name will be null during inference. * * @return the preparable DAG */ public static DAG1x1<CharacterDialog, String> createDAG() { // Define the "placeholder" of the DAG. When the DAG is executed, we'll provide the placeholder values as a list of // inputs. If you think of the DAG as consuming a list of "rows", where each row is an example, placeholders are // the "columns" (although in this case we have just one--using the CharacterDialog @Struct simplifies things over // feeding in the dialog and character name separately). // Using CharacterDialog.Placeholder, which derives from Placeholder<CharacterDialog>, allows us to use convenience // methods to access the fields of the CharacterDialog @Struct: CharacterDialog.Placeholder example = new CharacterDialog.Placeholder(); // FastText requires text to be broken into a list of tokens (words and punctuation symbols); we can use Tokens: Tokens dialogTokens = new Tokens().withTextInput(example.asDialog()).withLocale(Locale.UK); // Now configure the FastText classifier, from which we will later pull our supervised embeddings. Note that we're // not actually going to use this classifier to classify anything! All we care about are the embeddings. When this // DAG runs, the FastText classifier will be trained, but it won't actually infer labels on anything (this is an // important point if you have a large set of labels since inference cost [but not training cost!] in FastText // scales with the size of the label set). // Since transformers are immutable, with____(...) creates copies of the transformer with the specified setting // changed. This "in-place builder" pattern is the standard for configuring transformers. FastTextClassification<String> fastTextClassification = new FastTextClassification<String>() .withLabelInput(example.asCharacter()) .withTokensInput(dialogTokens) .withEpochCount(100) .withBucketCount(200000); // fewer buckets than default to save RAM; don't use this value for real problems! // Of course, we don't really care about the label predicted by FastText--we just want to use the trained model to // create an embedding for our text, which we'll use as features. Fortunately, FastTextClassification has a // asEmbeddedTokens() method, which provides a "view" of the trained model as a transformer that takes in a sequence // of tokens and produces an embedding. // This embedding is actually a (feature) vector, and Embedded.Features<CharSequence> implements // Producer<? extends DenseVector> (i.e. it produces a DenseVector we can pass to our downstream model): Embedded.Features<CharSequence> fastTextEmbedding = fastTextClassification.asEmbeddedTokens(); // An aside: we're only using the trained text embeddings from our FastText model, but you can also use // asEmbeddedLabel() on your FastText classifier to get a transformer that will embed labels according to the // learned label embeddings, too! // Note that we're *not* going to cross-train in this DAG, which we should normally do when using one model as an // input to another to avoid overfitting. We won't go into too much detail here, but (1) with embeddings, // cross-training is impractical, and (2) overfitting is less of a concern because the ability of the model to // "memorize" examples is somewhat limited (this would be especially true if FastText could ignore rare ngrams, but // currently this is not supported). LiblinearClassification<String> classification = new LiblinearClassification<String>() .withLabelInput(example.asCharacter()) .withFeaturesInput(fastTextEmbedding); // Our classification is actually a distribution over possible characters; we just want the most likely: MostLikelyLabelFromDistribution<String> mostLikelyCharacter = new MostLikelyLabelFromDistribution<String>().withInput(classification); // build the DAG by specifying the placeholder used and the desired output: return DAG.withPlaceholder(example).withOutput(mostLikelyCharacter); } public static void main(String[] args) { Experiment.run(createDAG()); } }
1,490
30,023
<gh_stars>1000+ { "config": { "step": { "user": { "description": "To find your API key, open the Tautulli webpage and navigate to Settings and then to Web interface. The API key will be at the bottom of that page.\n\nExample of the URL: ```http://192.168.0.10:8181``` with 8181 being the default port.", "data": { "api_key": "[%key:common::config_flow::data::api_key%]", "url": "[%key:common::config_flow::data::url%]", "verify_ssl": "[%key:common::config_flow::data::verify_ssl%]" } }, "reauth_confirm": { "title": "Re-authenticate Tautulli", "description": "To find your API key, open the Tautulli webpage and navigate to Settings and then to Web interface. The API key will be at the bottom of that page.", "data": { "api_key": "[%key:common::config_flow::data::api_key%]" } } }, "error": { "cannot_connect": "[%key:common::config_flow::error::cannot_connect%]", "invalid_auth": "[%key:common::config_flow::error::invalid_auth%]", "unknown": "[%key:common::config_flow::error::unknown%]" }, "abort": { "single_instance_allowed": "[%key:common::config_flow::abort::single_instance_allowed%]", "reauth_successful": "[%key:common::config_flow::abort::reauth_successful%]" } } }
563
448
<reponame>kaymes/terracotta<filename>tests/drivers/test_sqlite_remote.py """SQLite-remote driver specific tests. Tests that apply to all drivers go to test_drivers.py. """ import os import time import uuid import tempfile from pathlib import Path import pytest boto3 = pytest.importorskip('boto3') moto = pytest.importorskip('moto') @pytest.fixture(autouse=True) def mock_aws_env(monkeypatch): with monkeypatch.context() as m: m.setenv('AWS_DEFAULT_REGION', 'us-east-1') m.setenv('AWS_ACCESS_KEY_ID', 'FakeKey') m.setenv('AWS_SECRET_ACCESS_KEY', 'FakeSecretKey') m.setenv('AWS_SESSION_TOKEN', 'FakeSessionToken') yield class Timer: def __init__(self, auto=False): self.auto = auto self.time = 0 def __call__(self): if self.auto: self.time += 1 return self.time def tick(self): self.time += 1 @pytest.fixture() def s3_db_factory(tmpdir): bucketname = str(uuid.uuid4()) def _s3_db_factory(keys, datasets=None): from terracotta import get_driver with tempfile.TemporaryDirectory() as tmpdir: dbfile = Path(tmpdir) / 'tc.sqlite' driver = get_driver(dbfile) driver.create(keys) if datasets: for keys, path in datasets.items(): driver.insert(keys, path) with open(dbfile, 'rb') as f: db_bytes = f.read() conn = boto3.resource('s3') conn.create_bucket(Bucket=bucketname) s3 = boto3.client('s3') s3.put_object(Bucket=bucketname, Key='tc.sqlite', Body=db_bytes) return f's3://{bucketname}/tc.sqlite' return _s3_db_factory @moto.mock_s3 def test_remote_database(s3_db_factory): keys = ('some', 'keys') dbpath = s3_db_factory(keys) from terracotta import get_driver driver = get_driver(dbpath) assert driver.key_names == keys def test_invalid_url(): from terracotta import get_driver driver = get_driver('foo', provider='sqlite-remote') with pytest.raises(ValueError): with driver.connect(): pass @moto.mock_s3 def test_nonexisting_url(): from terracotta import get_driver, exceptions driver = get_driver('s3://foo/db.sqlite') with pytest.raises(exceptions.InvalidDatabaseError): with driver.connect(): pass @moto.mock_s3 def test_remote_database_cache(s3_db_factory, raster_file, monkeypatch): keys = ('some', 'keys') dbpath = s3_db_factory(keys) from terracotta import get_driver driver = get_driver(dbpath) driver._last_updated = -float('inf') with driver.connect(): assert driver.key_names == keys assert driver.get_datasets() == {} modification_date = os.path.getmtime(driver.path) s3_db_factory(keys, datasets={('some', 'value'): str(raster_file)}) # no change yet assert driver.get_datasets() == {} assert os.path.getmtime(driver.path) == modification_date # check if remote db is cached correctly driver._last_updated = time.time() with driver.connect(): # db connection is cached; so still no change assert driver.get_datasets() == {} assert os.path.getmtime(driver.path) == modification_date # invalidate cache driver._last_updated = -float('inf') with driver.connect(): # now db is updated on reconnect assert list(driver.get_datasets().keys()) == [('some', 'value')] assert os.path.getmtime(driver.path) != modification_date @moto.mock_s3 def test_immutability(s3_db_factory, raster_file): keys = ('some', 'keys') dbpath = s3_db_factory(keys, datasets={('some', 'value'): str(raster_file)}) from terracotta import get_driver driver = get_driver(dbpath) with pytest.raises(NotImplementedError): driver.create(keys) with pytest.raises(NotImplementedError): driver.insert(('some', 'value'), str(raster_file)) with pytest.raises(NotImplementedError): driver.delete(('some', 'value')) @moto.mock_s3 def test_destructor(s3_db_factory, raster_file, capsys): keys = ('some', 'keys') dbpath = s3_db_factory(keys, datasets={('some', 'value'): str(raster_file)}) from terracotta import get_driver driver = get_driver(dbpath) assert os.path.isfile(driver.path) driver.__del__() assert not os.path.isfile(driver.path) captured = capsys.readouterr() assert 'Exception ignored' not in captured.err # re-create file to prevent actual destructor from failing with open(driver.path, 'w'): pass
1,955
1,405
package com.lenovo.lps.sus.control; import android.view.View; /* loaded from: classes.dex */ class x implements View.OnClickListener { final /* synthetic */ SUSCustdefNotificationActivity a; /* JADX INFO: Access modifiers changed from: package-private */ public x(SUSCustdefNotificationActivity sUSCustdefNotificationActivity) { this.a = sUSCustdefNotificationActivity; } @Override // android.view.View.OnClickListener public void onClick(View view) { if (view.equals(this.a.m)) { r.b(); SUSCustdefNotificationActivity.i = 3; this.a.finish(); } } }
259
1,475
<filename>geode-assembly/src/integrationTest/java/org/apache/geode/tools/pulse/PulseSecurityConfigCustomProfileTest.java<gh_stars>1000+ /* * Licensed to the Apache Software Foundation (ASF) under one or more contributor license * agreements. See the NOTICE file distributed with this work for additional information regarding * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the License. You may obtain a * copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ package org.apache.geode.tools.pulse; import static org.apache.geode.test.junit.rules.HttpResponseAssert.assertResponse; import java.io.File; import java.io.IOException; import java.net.URL; import org.apache.commons.io.FileUtils; import org.apache.http.HttpResponse; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; import org.apache.geode.test.junit.categories.PulseTest; import org.apache.geode.test.junit.rules.GeodeHttpClientRule; import org.apache.geode.test.junit.rules.LocatorStarterRule; @Category({PulseTest.class}) public class PulseSecurityConfigCustomProfileTest { @ClassRule public static LocatorStarterRule locator = new LocatorStarterRule() .withSystemProperty("spring.profiles.active", "pulse.authentication.custom") .withHttpService(); private static File xmlFile; @BeforeClass public static void setUpCustomXml() throws IOException { // copy the pulse-authentication-custom.xml to the locator's working dir and start the locator xmlFile = new File(locator.getWorkingDir(), "pulse-authentication-custom.xml"); URL xmlUrl = PulseSecurityConfigCustomProfileTest.class.getClassLoader() .getResource("pulse-authentication-custom.xml"); FileUtils.copyURLToFile(xmlUrl, xmlFile); locator.startLocator(); } @AfterClass public static void cleanUp() { xmlFile.delete(); } @Rule public GeodeHttpClientRule client = new GeodeHttpClientRule(locator::getHttpPort); @Test public void testLogin() throws Exception { HttpResponse response = client.loginToPulse("admin", "admin"); assertResponse(response).hasStatusCode(302).hasHeaderValue("Location") .contains("/pulse/login.html?error=BAD_CREDS"); client.loginToPulseAndVerify("test", "test"); } @Test public void loginPage() throws Exception { HttpResponse response = client.get("/pulse/login.html"); assertResponse(response).hasStatusCode(200).hasResponseBody().contains("<html>"); } @Test public void authenticateUser() throws Exception { HttpResponse response = client.get("/pulse/authenticateUser"); assertResponse(response).hasStatusCode(200).hasResponseBody() .isEqualTo("{\"isUserLoggedIn\":false}"); } @Test public void dataBrowserRegions() throws Exception { HttpResponse response = client.get("/pulse/dataBrowserRegions"); // get a restricted page will result in login page assertResponse(response).hasStatusCode(200).hasResponseBody() .contains( "<form method=\"POST\" action=\"login\" name=\"loginForm\" id=\"loginForm\" autocomplete=\"off\">"); } @Test public void pulseVersion() throws Exception { HttpResponse response = client.get("/pulse/pulseVersion"); assertResponse(response).hasStatusCode(200).hasResponseBody().contains("{\"pulseVersion"); } }
1,193
14,668
// Copyright 2020 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // // This variant of the keyframe effect model performs additional processing // for computed keyframes. The algorithm for constructing keyframes for a CSS // animation is covered in the following spec: // https://drafts.csswg.org/css-animations-2/#keyframes // // Most of the steps for constructing computed keyframes are handled during // the construction process; however, evaluation of computed property values // is handled as a lazy operation when fetching the keyframes. #ifndef THIRD_PARTY_BLINK_RENDERER_CORE_ANIMATION_CSS_CSS_KEYFRAME_EFFECT_MODEL_H_ #define THIRD_PARTY_BLINK_RENDERER_CORE_ANIMATION_CSS_CSS_KEYFRAME_EFFECT_MODEL_H_ #include "third_party/blink/renderer/core/animation/keyframe_effect_model.h" namespace blink { class CssKeyframeEffectModel : public StringKeyframeEffectModel { public: explicit CssKeyframeEffectModel( const KeyframeVector& keyframes, CompositeOperation composite = kCompositeReplace, scoped_refptr<TimingFunction> default_keyframe_easing = nullptr) : StringKeyframeEffectModel(keyframes, composite, std::move(default_keyframe_easing)) {} // Overridden to fill in missing property values for generated "from" and "to" // keyframes. TODO(crbug.com/1070627): Also perform the following steps: // 1) filter variables from keyframes, 2) use computed values for properties // rather than values from the keyframes rule, and 3) switch logical to // physical properties. KeyframeEffectModelBase::KeyframeVector GetComputedKeyframes( Element* element) override; }; } // namespace blink #endif // THIRD_PARTY_BLINK_RENDERER_CORE_ANIMATION_CSS_CSS_KEYFRAME_EFFECT_MODEL_H_
620
971
#include "common/graph.h" #include "test_util/test_harness.h" namespace noisepage { using common::Graph; class GraphTest : public TerrierTest {}; // ---------------------------------------------------------------------------- // Test Utilities // ---------------------------------------------------------------------------- /** * Determine if two vertex sets are equal. * @param a Input vertex set * @param b Input vertex set * @return `true` if the vertex sets are equal, `false` otherwise */ static bool VertexSetsEqual(const std::vector<std::size_t> &a, const std::vector<std::size_t> &b) { std::vector<std::size_t> local_a{a.cbegin(), a.cend()}; std::vector<std::size_t> local_b{b.cbegin(), b.cend()}; std::sort(local_a.begin(), local_a.end()); std::sort(local_b.begin(), local_b.end()); return (local_a.size() == local_b.size()) && std::equal(local_a.cbegin(), local_a.cend(), local_a.cbegin()); } /** * Determine if two edge sets are equal. * @param a Input edge set * @param b Input edge set * @return `true` if the edge sets are equal, `false` otherwise */ static bool EdgeSetsEqual(const std::vector<std::pair<std::size_t, std::size_t>> &a, const std::vector<std::pair<std::size_t, std::size_t>> &b) { std::vector<std::pair<std::size_t, std::size_t>> local_a{a.cbegin(), a.cend()}; std::vector<std::pair<std::size_t, std::size_t>> local_b{b.cbegin(), b.cend()}; std::sort(local_a.begin(), local_a.end()); std::sort(local_b.begin(), local_b.end()); return (local_a.size() == local_b.size()) && std::equal(local_a.cbegin(), local_a.cend(), local_b.cbegin()); } // ---------------------------------------------------------------------------- // Graph Construction // ---------------------------------------------------------------------------- TEST_F(GraphTest, Construction0) { Graph g{}; std::vector<std::size_t> expected_vertex_set{}; std::vector<std::pair<std::size_t, std::size_t>> expected_edge_set{}; EXPECT_EQ(g.Order(), 0UL); EXPECT_EQ(g.Size(), 0UL); EXPECT_TRUE(VertexSetsEqual(g.VertexSet(), expected_vertex_set)); EXPECT_TRUE(EdgeSetsEqual(g.EdgeSet(), expected_edge_set)); } TEST_F(GraphTest, Construction1) { Graph g{}; g.AddEdge(0, 1); std::vector<std::size_t> expected_vertex_set{0, 1}; std::vector<std::pair<std::size_t, std::size_t>> expected_edge_set{{0, 1}}; EXPECT_EQ(g.Order(), 2UL); EXPECT_EQ(g.Size(), 1UL); EXPECT_TRUE(VertexSetsEqual(g.VertexSet(), expected_vertex_set)); EXPECT_TRUE(EdgeSetsEqual(g.EdgeSet(), expected_edge_set)); } TEST_F(GraphTest, Construction2) { Graph g{}; g.AddEdge(0, 1); g.AddEdge(1, 0); g.AddVertex(2); std::vector<std::size_t> expected_vertex_set{0, 1, 2}; std::vector<std::pair<std::size_t, std::size_t>> expected_edge_set{{0, 1}, {1, 0}}; EXPECT_EQ(g.Order(), 3UL); EXPECT_EQ(g.Size(), 2UL); EXPECT_TRUE(VertexSetsEqual(g.VertexSet(), expected_vertex_set)); EXPECT_TRUE(EdgeSetsEqual(g.EdgeSet(), expected_edge_set)); } TEST_F(GraphTest, Construction3) { const auto a = Graph::FromEdgeSet({{0, 1}, {1, 2}}); Graph b{}; b.AddEdge(0, 1); b.AddEdge(1, 2); EXPECT_EQ(a.Order(), 3UL); EXPECT_EQ(a.Size(), 2UL); EXPECT_EQ(b.Order(), 3UL); EXPECT_EQ(b.Size(), 2UL); EXPECT_EQ(a, b); } TEST_F(GraphTest, Construction4) { const auto g = Graph::FromEdgeSet({{0, 1}, {1, 2}}); std::vector<std::size_t> expected_vertex_set{0, 1, 2}; std::vector<std::pair<std::size_t, std::size_t>> expected_edge_set{{0, 1}, {1, 2}}; EXPECT_EQ(g.Order(), 3UL); EXPECT_EQ(g.Size(), 2UL); EXPECT_TRUE(VertexSetsEqual(g.VertexSet(), expected_vertex_set)); EXPECT_TRUE(EdgeSetsEqual(g.EdgeSet(), expected_edge_set)); } // ---------------------------------------------------------------------------- // Graph Eqaulity // ---------------------------------------------------------------------------- TEST_F(GraphTest, Equality0) { Graph a{}; Graph b{}; EXPECT_EQ(a, b); } TEST_F(GraphTest, Equality1) { Graph a{}; a.AddEdge(0, 1); a.AddEdge(1, 2); Graph b{}; b.AddEdge(0, 1); b.AddEdge(1, 2); EXPECT_EQ(a, b); } TEST_F(GraphTest, Equality2) { // Distinct edge sets Graph a{}; a.AddEdge(0, 1); a.AddEdge(1, 2); Graph b{}; b.AddEdge(0, 1); b.AddEdge(1, 2); b.AddEdge(0, 2); EXPECT_NE(a, b); } TEST_F(GraphTest, Equality3) { // Distinct vertex sets Graph a{}; a.AddEdge(0, 1); a.AddEdge(1, 2); Graph b{}; b.AddEdge(0, 1); b.AddEdge(1, 2); b.AddVertex(3); EXPECT_NE(a, b); } } // namespace noisepage
1,788
3,474
// // Generated by class-dump 3.5 (64 bit). // // class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2013 by <NAME>. // #import "NSObject.h" @class NSObject<OS_dispatch_queue>, NSPredicate, NSString, NSTimer, XCTNSPredicateExpectation; @interface _XCTNSPredicateExpectationImplementation : NSObject { XCTNSPredicateExpectation *_expectation; id <XCTNSPredicateExpectationObject> _object; NSPredicate *_predicate; CDUnknownBlockType _handler; NSTimer *_timer; NSObject<OS_dispatch_queue> *_queue; BOOL _hasCleanedUp; } @property(readonly, copy) NSPredicate *predicate; // @synthesize predicate=_predicate; @property(readonly) id <XCTNSPredicateExpectationObject> object; // @synthesize object=_object; @property(copy) CDUnknownBlockType handler; - (void)cleanup; - (void)_considerFulfilling; - (id)initWithPredicate:(id)arg1 object:(id)arg2 expectation:(id)arg3; @end
352
750
from jasmin.tools.singleton import Singleton from jasmin.tools.stats import Stats class HttpAPIStatistics(Stats): """HTTP API statistics holder""" def __init__(self, api_id): self.api_id = api_id self.init() def init(self): self._stats = { 'created_at': 0, 'request_count': 0, 'last_request_at': 0, 'auth_error_count': 0, 'route_error_count': 0, 'interceptor_error_count': 0, 'interceptor_count': 0, 'throughput_error_count': 0, 'charging_error_count': 0, 'server_error_count': 0, 'success_count': 0, 'last_success_at': 0, } def getStats(self): return self._stats class HttpAPIStatsCollector(metaclass=Singleton): """HTTP API statistics collection holder""" apis = {} def get(self): """Return a HTTP API's stats object or instanciate a new one""" api_id = 'main' if api_id not in self.apis: self.apis[api_id] = HttpAPIStatistics(api_id) return self.apis[api_id]
541
678
<reponame>bzxy/cydia /** * This header is generated by class-dump-z 0.2b. * * Source: /System/Library/PrivateFrameworks/iAdCore.framework/iAdCore */ #import <iAdCore/XXUnknownSuperclass.h> @class NSMutableArray; @interface ADResourceDownloader : XXUnknownSuperclass { @private unsigned _maxConcurrency; // 4 = 0x4 NSMutableArray *_downloadQueue; // 8 = 0x8 NSMutableArray *_activeJobs; // 12 = 0xc } @property(retain, nonatomic) NSMutableArray *downloadQueue; // G=0xe909; S=0xe919; @synthesize=_downloadQueue @property(retain, nonatomic) NSMutableArray *activeJobs; // G=0xe93d; S=0xe94d; @synthesize=_activeJobs @property(assign, nonatomic) unsigned maxConcurrency; // G=0xe8f9; S=0xcfcd; @synthesize=_maxConcurrency // declared property setter: - (void)setActiveJobs:(id)jobs; // 0xe94d // declared property getter: - (id)activeJobs; // 0xe93d // declared property setter: - (void)setDownloadQueue:(id)queue; // 0xe919 // declared property getter: - (id)downloadQueue; // 0xe909 // declared property getter: - (unsigned)maxConcurrency; // 0xe8f9 - (void)cancelRequest:(id)request; // 0xe671 - (void)cancelAllRequests; // 0xe2f5 - (void)cancelRequestForURL:(id)url; // 0xdef5 - (void)downloadRequestJob:(id)job; // 0xddf1 - (id)connection:(id)connection willCacheResponse:(id)response; // 0xdded - (void)connection:(id)connection didReceiveAuthenticationChallenge:(id)challenge; // 0xdcdd - (void)connection:(id)connection didFailWithError:(id)error; // 0xdae1 - (void)connectionDidFinishLoading:(id)connection; // 0xd891 - (void)connection:(id)connection didReceiveData:(id)data; // 0xd851 - (void)connection:(id)connection didReceiveResponse:(id)response; // 0xd09d - (id)jobForConnection:(id)connection; // 0xcfed // declared property setter: - (void)setMaxConcurrency:(unsigned)concurrency; // 0xcfcd - (void)_processNextJob; // 0xcc49 - (id)init; // 0xcbad - (void)dealloc; // 0xcb39 @end
710
10,225
<filename>independent-projects/resteasy-reactive/common/processor/src/main/java/org/jboss/resteasy/reactive/common/processor/AdditionalWriters.java package org.jboss.resteasy.reactive.common.processor; public class AdditionalWriters extends AdditionalReaderWriterCommon { }
77
3,477
<gh_stars>1000+ // Copyright Microsoft and Project Verona Contributors. // SPDX-License-Identifier: MIT /** * This benchmark is for testing performance of the scheduling code. * * There are n cowns, each executing m writes to a large statically allocated * array of memory. Each cown performs c behaviours. */ #include "test/log.h" #include "test/opt.h" #include "test/xoroshiro.h" #include "verona.h" #include <chrono> #include <test/harness.h> namespace sn = snmalloc; namespace rt = verona::rt; // Memory to use for workload std::atomic<size_t>* global_array; size_t global_array_size; // Number of writes on each iteration size_t writes; struct LoopCown : public VCown<LoopCown> { size_t count; xoroshiro::p128r32 rng; LoopCown(size_t count, size_t seed) : count(count) { rng.set_state(seed); } void go() { if (count > 0) { count--; schedule_lambda(this, [this]() { work(); go(); }); } else { Cown::release(ThreadAlloc::get(), this); } } void work() { for (size_t i = 0; i < writes; i++) { auto& cell = global_array[rng.next() & (global_array_size - 1)]; auto x = cell.load(std::memory_order_acquire); cell.store(x + 7, std::memory_order_release); } } }; int main(int argc, char** argv) { for (int i = 0; i < argc; i++) { printf(" %s", argv[i]); } printf("\n"); opt::Opt opt(argc, argv); // auto& alloc = sn::ThreadAlloc::get(); const auto cores = opt.is<size_t>("--cores", 4); const auto cowns = (size_t)1 << opt.is<size_t>("--cowns", 8); global_array_size = (size_t)1 << opt.is<size_t>("--size", 22); global_array = new std::atomic<size_t>[global_array_size]; const auto loops = opt.is<size_t>("--loops", 100); writes = opt.is<size_t>("--writes", 0); auto& sched = rt::Scheduler::get(); sched.set_fair(true); for (int l = 0; l < 20; l++) { sched.init(cores); for (size_t i = 0; i < cowns; i++) { auto c = new LoopCown(loops, i + 200); c->go(); } auto start = sn::Aal::tick(); sched.run(); auto end = sn::Aal::tick(); std::cout << "Time:" << (end - start) / (cowns * loops) << std::endl; } delete[] global_array; snmalloc::debug_check_empty<snmalloc::Alloc::StateHandle>(); }
1,086
310
<filename>exercises/concept/boutique-inventory/.meta/config.json { "blurb": "Learn advanced enumeration functionality by preparing your online fashion boutique for its big annual sale.", "authors": ["iHiD"], "files": { "solution": ["boutique_inventory.rb"], "test": ["boutique_inventory_test.rb"], "exemplar": [".meta/exemplar.rb"] } }
122
377
<filename>pyattck/mobile/mitigation.py from .mobileattckobject import MobileAttckObject class MobileAttckMitigation(MobileAttckObject): """Mobile MITRE ATT&CK Mitigation object. A child class of MobileAttckObject Creates objects which have been categorized as potential mitigations Example: You can iterate over a `mitigations` list and access specific properties and relationship properties. The following relationship properties are accessible: 1. techniques 1. To iterate over an `mitigations` list, do the following: .. code-block:: python from pyattck import Attck attck = Attck() for mitigation in attck.mobile.mitigations: print(mitigation.id) print(mitigation.name) print(mitigation.description) # etc. 2. To access relationship properties, do the following: .. code-block:: python from pyattck import Attck attck = Attck() for mitigation in attck.mobile.mitigations: print(mitigation.id) print(mitigation.name) print(mitigation.description) # etc. for technique in mitigation.techniques: print(technique.name) print(technique.description) # etc. """ def __init__(self, mobile_attck_obj = None, **kwargs): """ Creates an MobileAttckMitigation object. The MobileAttckMitigation object is considered a list of mitigations to threats based on the MITRE Mobile ATT&CK Framework Arguments: mobile_attck_obj (json) -- Takes the raw MITRE Mobile ATT&CK Json object AttckObject (dict) -- Takes the MITRE Mobile ATT&CK Json object as a kwargs values """ super(MobileAttckMitigation, self).__init__(**kwargs) self.__mobile_attck_obj = mobile_attck_obj self.old_attack_id = self._set_attribute(kwargs, 'x_mitre_old_attack_id') self.external_reference = self._set_reference(kwargs) self.created_by_ref = self._set_attribute(kwargs, 'created_by_ref') self.version = self._set_attribute(kwargs, 'x_mitre_version') self.stix = self._set_attribute(kwargs, 'id') self.wiki = self._set_wiki(kwargs) self.set_relationships(self.__mobile_attck_obj) @property def techniques(self): """ Accessing known techniques that this mitigation advice is associated with as part of the MITRE Mobile ATT&CK Framework Returns: list: Returns all technique objects as a list that are associated with a mitigation object """ from .technique import MobileAttckTechnique return_list = [] item_dict = {} for item in self.__mobile_attck_obj['objects']: if 'type' in item: if item['type'] == 'attack-pattern': item_dict[item['id']] = item for item in self._RELATIONSHIPS[self.stix]: if item in item_dict: return_list.append(MobileAttckTechnique(mobile_attck_obj=self.__mobile_attck_obj, **item_dict[item])) return return_list
1,518
334
# -*- coding: utf-8 -*- # Copyright (c) 2015 Ericsson AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Openssl wrapper used to generate and sign certificates. This module depends on openssl. """ import os import subprocess import sys import tempfile import time import random import shutil from calvin.utilities import confsort import OpenSSL from cryptography.x509 import load_pem_x509_certificate from cryptography.hazmat.backends import default_backend from calvin.utilities import certificate from calvin.utilities import calvinuuid from calvin.utilities import calvinconfig from calvin.utilities.calvinlogger import get_logger from calvin.utilities.utils import get_home _log = get_logger(__name__) _conf = calvinconfig.get() class CS(): """ A Code Signer (CS) class used to sign actors and applications. The CS is uniquely identified by its organization and common name. If the CS does not exist, it will be created on first reference. """ def __init__(self, organization, commonName, security_dir=None, force=False, readonly=False): self.cs_dir = self.get_cs_credentials_path(organization, security_dir) self.outpath = os.path.join(self.cs_dir, "new_signed_code") self.private = os.path.join(self.cs_dir, "private") self.private_key = os.path.join(self.private, "cs.key") self.out = os.path.join(self.cs_dir, "cs.pem") self.password_file = os.path.join(self.private, "cs_password") self.certificate = os.path.join(self.cs_dir, "cscert.pem") _log.debug("CS init, organization={}, commonName={}".format(organization, commonName)) print"CS init, organization="+ organization+", commonName="+commonName exist = os.path.isdir(self.cs_dir) if not exist and readonly: raise Exception("CS dir does not exist, create Code Signer first") elif exist and not force: print "CS already exist, let's use it" else: _log.debug("Code signer dir does not exist, let's create CS") #Generate keys and CA certiticate try: self.new_cs_credentials(organization, commonName, security_dir=security_dir, force=False, readonly=False) except: _log.error("creation of new CS credentials failed") print "Made new code signer" def new_cs_credentials(self, organization, commonName, security_dir=None, force=False, readonly=False): """ Generate keys, files and certificate for the new CA """ _log.debug("new_cs_credentials") os.umask(0077) code_signers_dir = self.get_code_signers_credentials_path(security_dir) if not os.path.isdir(code_signers_dir): try: os.mkdir(code_signers_dir, 0700) except OSError: pass try: os.mkdir(self.cs_dir, 0700) except OSError: pass try: os.mkdir(self.outpath, 0700) except OSError: pass try: os.mkdir(self.private, 0700) except OSError: pass subject = "/O={}/CN={}".format(organization, commonName) # Generate a password for protection of the private key, # store it in the password file password = self.generate_password(20) try: with open(self.password_file,'w') as fd: fd.write(password) except Exception as err: _log.err("Failed to write CS password to file, err={}".format(err)) raise out = os.path.join(self.outpath, "{}.csr".format(organization)) log = subprocess.Popen(["openssl", "req", "-new", "-x509", "-days", "1825", "-utf8", "-subj", subject, "-passout", "file:{}".format(self.password_file), "-out", self.certificate, "-keyout", self.private_key], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = log.communicate() _log.debug("new_cs_credentials") if log.returncode != 0: _log.error("CS req failed") raise IOError(stderr) return def generate_password(self, length): from os import urandom _log.debug("generate_password, length={}".format(length)) if not isinstance(length, int) or length < 8: raise ValueError("Password must be longer") chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ123456789!#%&/()=?[]{}" return "".join(chars[ord(c) % len(chars)] for c in urandom(length)) def remove_cs(self, cs_name, security_dir=None): """ Remove an existing code signer, uses default security directory if not supplied. """ cs_dir = get_cs_path(security_dir) if os.path.isdir(cs_dir): shutil.rmtree(cs_dir, ignore_errors=True) def get_cs_cert(self): """ Return CA certificate if it exist, if not, raise exception """ return certificate.load_cert(self.certificate) def export_cs_cert(self, path): """ Copy the certificate giving it the name that can be stored in trustStore for verification of signatures. file is the out file """ return certificate.export_cert(self.certificate, path) def sign_file(self, file, dir=None): """ Sign an actor, component or application. Store the signature in <file>.sign.<hash-cert> File is the file to be signed. Equivalent of: openssl dgst -sha256 -sign "$private_key" -out "$file.sign.<cert-hash>" -passin file:$private_dir/ca_password "$file" """ _log.debug("sign_file: file={}".format(file)) try: certificate_hash = certificate.cert_hash(certpath=self.certificate) except: _log.exception("Failed to get certificate hash") raise Exception("Failed to get certificate hash") sign_file_name = file + ".sign." + certificate_hash if dir: sign_file = os.path.join(dir, sign_file_name) else: sign_file = sign_file_name print "signed file name="+sign_file log = subprocess.Popen(["openssl", "dgst", "-sha256", "-sign", self.private_key, "-passin", "file:" + self.password_file, "-out", sign_file, file], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = log.communicate() if log.returncode != 0: raise IOError(stderr) with open(sign_file, 'rt') as f: signature = f.read() with open(file, 'rt') as f: content= f.read() with open(self.certificate, 'rt') as f: trusted_cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, f.read()) try: # Verify signature OpenSSL.crypto.verify(trusted_cert, signature, content, 'sha256') _log.debug("verify_signature_content: signature correct") except Exception as e: _log.error("OpenSSL verification error", exc_info=True) return sign_file def verify_private_key_exist(self): """Return the node's private key""" return os.path.isfile(self.private_key) def get_cs_public_key(self): """Return the public key from certificate""" return certificate.get_public_key(self.certificate) def get_cs_credentials_path(self, name, security_dir=None): """Return the node's own certificate name without file extension""" _log.debug("get_cs_credentials_path") return os.path.join(self.get_code_signers_credentials_path(security_dir), name) def get_code_signers_credentials_path(self, security_dir=None): """Return the node's own certificate name without file extension""" _log.debug("get_cs_credentials_path") return os.path.join(certificate.get_security_credentials_path(security_dir), "code_signers")
4,320
375
<reponame>matthew-reynolds/ros_control /////////////////////////////////////////////////////////////////////////////// // Copyright (C) 2012, hiDOF INC. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of hiDOF, Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. ////////////////////////////////////////////////////////////////////////////// /// \author <NAME> #pragma once #include <cassert> #include <string> #include <hardware_interface/internal/hardware_resource_manager.h> #include <hardware_interface/posvel_command_interface.h> namespace hardware_interface { /** \brief A handle used to read and command a single joint. */ class PosVelAccJointHandle : public PosVelJointHandle { public: PosVelAccJointHandle() = default; /** * \param js This joint's state handle * \param cmd_pos A pointer to the storage for this joint's output command position * \param cmd_vel A pointer to the storage for this joint's output command velocity * \param eff_cmd A pointer to the storage for this joint's output command acceleration */ PosVelAccJointHandle(const JointStateHandle& js, double* cmd_pos, double* cmd_vel, double* cmd_acc) : PosVelJointHandle(js, cmd_pos, cmd_vel), cmd_acc_(cmd_acc) { if (!cmd_acc) { throw HardwareInterfaceException("Cannot create handle '" + js.getName() + "'. Command acceleration data pointer is null."); } } void setCommand(double cmd_pos, double cmd_vel, double cmd_acc) { setCommandPosition(cmd_pos); setCommandVelocity(cmd_vel); setCommandAcceleration(cmd_acc); } void setCommandAcceleration(double cmd_acc) {assert(cmd_acc_); *cmd_acc_ = cmd_acc;} double getCommandAcceleration() const {assert(cmd_acc_); return *cmd_acc_;} private: double* cmd_acc_ = {nullptr}; }; /** \brief Hardware interface to support commanding an array of joints. * * This \ref HardwareInterface supports commanding joints by position, velocity & * acceleration together in one command. * * \note Getting a joint handle through the getHandle() method \e will claim that resource. * */ class PosVelAccJointInterface : public HardwareResourceManager<PosVelAccJointHandle, ClaimResources> {}; }
1,034
10,225
package io.quarkus.reactivemessaging.http.runtime.serializers; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.List; import java.util.Map; import org.jboss.logging.Logger; /** * a base superclass for a SerializerFactory that is generated in build time */ public abstract class SerializerFactoryBase { private static final Logger log = Logger.getLogger(SerializerFactoryBase.class); private final Map<String, Serializer<?>> serializersByClassName = new HashMap<>(); private final List<Serializer<?>> predefinedSerializers = new ArrayList<>(); protected SerializerFactoryBase() { predefinedSerializers.add(new JsonObjectSerializer()); predefinedSerializers.add(new JsonArraySerializer()); predefinedSerializers.add(new StringSerializer()); predefinedSerializers.add(new BufferSerializer()); predefinedSerializers.add(new ObjectSerializer()); predefinedSerializers.add(new CollectionSerializer()); predefinedSerializers.add(new NumberSerializer()); predefinedSerializers.sort(Comparator.comparingInt(Serializer::getPriority)); Collections.reverse(predefinedSerializers); initAdditionalSerializers(); } /** * method that initializes additional serializers (used by user's config). * Implemented in the generated subclass */ protected abstract void initAdditionalSerializers(); /** * get a {@link Serializer} of a given (class) name or for a given payload type * * @param name name of the serializer * @param payload payload to serialize * @param <T> type of the payload * @return serializer */ public <T> Serializer<T> getSerializer(String name, T payload) { if (payload == null) { throw new IllegalArgumentException("Payload cannot be null"); } if (name != null) { @SuppressWarnings("unchecked") Serializer<T> serializer = (Serializer<T>) serializersByClassName.get(name); if (serializer == null) { throw new IllegalArgumentException("No serializer class found for name: " + name); } if (serializer.handles(payload)) { return serializer; } else { log.warnf("Specified serializer (%s) does not handle the payload type %s", name, payload.getClass()); } } for (Serializer<?> serializer : predefinedSerializers) { if (serializer.handles(payload)) { //noinspection unchecked return (Serializer<T>) serializer; } } throw new IllegalArgumentException("No predefined serializer found matching class: " + payload.getClass()); } @SuppressWarnings("unused") // used by a generated subclass public void addSerializer(String className, Serializer<?> serializer) { serializersByClassName.put(className, serializer); } }
1,128
348
{"nom":"Bligny","circ":"2ème circonscription","dpt":"Marne","inscrits":100,"abs":45,"votants":55,"blancs":11,"nuls":1,"exp":43,"res":[{"nuance":"LR","nom":"Mme <NAME>","voix":22},{"nuance":"REM","nom":"Mme <NAME>","voix":21}]}
92
7,936
{"states":[{"state":{"state_id":"AN","state_name":"Andaman and Nicobar Island (UT)"}},{"state":{"state_id":"AP","state_name":"Andhra Pradesh"}},{"state":{"state_id":"AR","state_name":"Arunachal Pradesh"}},{"state":{"state_id":"AS","state_name":"Assam"}},{"state":{"state_id":"BR","state_name":"Bihar"}},{"state":{"state_id":"CH","state_name":"Chandigarh (UT)"}},{"state":{"state_id":"CG","state_name":"Chhattisgarh"}},{"state":{"state_id":"DN","state_name":"Dadra and Nagar Haveli (UT)"}},{"state":{"state_id":"DD","state_name":"Daman and Diu (UT)"}},{"state":{"state_id":"DL","state_name":"Delhi (NCT)"}},{"state":{"state_id":"GA","state_name":"Goa"}},{"state":{"state_id":"GJ","state_name":"Gujarat"}},{"state":{"state_id":"HR","state_name":"Haryana"}},{"state":{"state_id":"HP","state_name":"Himachal Pradesh"}},{"state":{"state_id":"JK","state_name":"Jammu and Kashmir"}},{"state":{"state_id":"JH","state_name":"Jharkhand"}},{"state":{"state_id":"KA","state_name":"Karnataka"}},{"state":{"state_id":"KL","state_name":"Kerala"}},{"state":{"state_id":"LD","state_name":"Lakshadweep (UT)"}},{"state":{"state_id":"MP","state_name":"Madhya Pradesh"}},{"state":{"state_id":"MH","state_name":"Maharashtra"}},{"state":{"state_id":"MN","state_name":"Manipur"}},{"state":{"state_id":"ML","state_name":"Meghalaya"}},{"state":{"state_id":"MZ","state_name":"Mizoram"}},{"state":{"state_id":"NL","state_name":"Nagaland"}},{"state":{"state_id":"OR","state_name":"Odisha"}},{"state":{"state_id":"PY","state_name":"Puducherry (UT)"}},{"state":{"state_id":"PB","state_name":"Punjab"}},{"state":{"state_id":"RJ","state_name":"Rajasthan"}},{"state":{"state_id":"SK","state_name":"Sikkim"}},{"state":{"state_id":"TN","state_name":"Tamil Nadu"}},{"state":{"state_id":"TG","state_name":"Telangana"}},{"state":{"state_id":"TR","state_name":"Tripura"}},{"state":{"state_id":"UK","state_name":"Uttarakhand"}},{"state":{"state_id":"UP","state_name":"Uttar Pradesh"}},{"state":{"state_id":"WB","state_name":"West Bengal"}}]}
669
665
#import <Cocoa/Cocoa.h> #import "xadmaster.h" #define XADResourceForkData @"XADResourceForkData" #define XADFinderFlags @"XADFinderFlags" typedef enum { XADAbort,XADRetry,XADSkip,XADOverwrite,XADRename, } XADAction; typedef xadERROR XADError; @class XADArchivePipe,UniversalDetector; @interface XADArchive:NSObject { NSString *filename; NSArray *volumes; NSData *memdata; XADArchive *parentarchive; XADArchivePipe *pipe; id delegate; NSStringEncoding name_encoding; NSString *password; NSTimeInterval update_interval; double update_time; struct xadMasterBase *xmb; struct xadArchiveInfo *archive; struct Hook progresshook; NSMutableArray *fileinfos; NSMutableDictionary *dittoforks; NSMutableArray *writeperms; int currentry; xadSize extractsize,totalsize; NSString *immediatedestination; BOOL immediatefailed; UniversalDetector *detector; NSStringEncoding detected_encoding; float detector_confidence; XADError lasterror; } -(id)init; -(id)initWithFile:(NSString *)file; -(id)initWithFile:(NSString *)file error:(XADError *)error; -(id)initWithFile:(NSString *)file delegate:(id)del error:(XADError *)error; -(id)initWithData:(NSData *)data; -(id)initWithData:(NSData *)data error:(XADError *)error; -(id)initWithArchive:(XADArchive *)archive entry:(int)n; -(id)initWithArchive:(XADArchive *)archive entry:(int)n error:(XADError *)error; -(id)initWithArchive:(XADArchive *)otherarchive entry:(int)n immediateExtractionTo:(NSString *)destination delegate:(id)del encoding:(NSStringEncoding)encoding error:(XADError *)error; -(void)dealloc; -(BOOL)_finishInit:(xadTAGPTR)tags error:(XADError *)error; -(xadUINT32)_newEntryCallback:(struct xadProgressInfo *)info; -(NSString *)filename; -(NSArray *)allFilenames; -(NSString *)formatName; -(BOOL)isEncrypted; -(BOOL)isCorrupted; -(BOOL)immediateExtractionFailed; -(int)numberOfEntries; -(NSString *)nameOfEntry:(int)n; -(int)sizeOfEntry:(int)n; -(int)compressedSizeOfEntry:(int)n; -(BOOL)entryIsDirectory:(int)n; -(BOOL)entryIsLink:(int)n; -(BOOL)entryIsEncrypted:(int)n; -(NSDictionary *)attributesOfEntry:(int)n; -(NSDictionary *)attributesOfEntry:(int)n withResourceFork:(BOOL)resfork; -(NSData *)contentsOfEntry:(int)n; -(NSData *)_contentsOfFileInfo:(struct xadFileInfo *)info; -(BOOL)_entryIsLonelyResourceFork:(int)n; -(int)_entryIndexOfName:(NSString *)name; -(const char *)_undecodedNameOfEntry:(int)n; -(BOOL)extractTo:(NSString *)destination; -(BOOL)extractEntries:(NSIndexSet *)entries to:(NSString *)destination; -(BOOL)extractEntry:(int)n to:(NSString *)destination; -(BOOL)extractEntry:(int)n to:(NSString *)destination overrideWritePermissions:(BOOL)override; -(BOOL)extractArchiveEntry:(int)n to:(NSString *)destination; -(void)fixWritePermissions; -(BOOL)_extractEntry:(int)n as:(NSString *)destfile; -(BOOL)_extractFileEntry:(int)n as:(NSString *)destfile; -(BOOL)_extractDirectoryEntry:(int)n as:(NSString *)destfile; -(BOOL)_extractLinkEntry:(int)n as:(NSString *)destfile; -(BOOL)_ensureDirectoryExists:(NSString *)directory; -(BOOL)_changeAllAttributes:(NSDictionary *)attrs atPath:(NSString *)path overrideWritePermissions:(BOOL)override; -(NSString *)commonTopDirectory; -(void)setDelegate:(id)delegate; -(id)delegate; -(NSStringEncoding)nameEncoding; -(void)setNameEncoding:(NSStringEncoding)encoding; -(NSStringEncoding)encodingForString:(const char *)cstr; -(BOOL)_stringIsASCII:(const char *)cstr; -(void)_runDetectorOn:(const char *)cstr; -(NSString *)password; -(void)setPassword:(NSString *)newpassword; -(const char *)_encodedPassword; -(void)setProgressInterval:(NSTimeInterval)interval; -(xadUINT32)_progressCallback:(struct xadProgressInfo *)info; -(void)_reportInputPosition; -(BOOL)_canHaveDittoResourceForks; -(BOOL)_fileInfoIsDittoResourceFork:(struct xadFileInfo *)info; -(NSString *)_nameOfDataForkForDittoResourceFork:(struct xadFileInfo *)info; -(void)_parseDittoResourceFork:(struct xadFileInfo *)info intoAttributes:(NSMutableDictionary *)attrs; -(XADError)lastError; -(void)clearLastError; -(NSString *)describeLastError; -(NSString *)describeError:(XADError)error; -(struct xadMasterBase *)xadMasterBase; -(struct xadArchiveInfo *)xadArchiveInfo; -(struct xadFileInfo *)xadFileInfoForEntry:(int)n; -(NSString *)description; +(XADArchive *)archiveForFile:(NSString *)filename; +(XADArchive *)recursiveArchiveForFile:(NSString *)filename; +(NSArray *)volumesForFile:(NSString *)filename; @end @interface NSObject (XADArchiveDelegate) -(NSStringEncoding)archive:(XADArchive *)archive encodingForName:(const char *)bytes guess:(NSStringEncoding)guess confidence:(float)confidence; -(XADAction)archive:(XADArchive *)archive nameDecodingDidFailForEntry:(int)n bytes:(const char *)bytes; -(BOOL)archiveExtractionShouldStop:(XADArchive *)archive; -(BOOL)archive:(XADArchive *)archive shouldCreateDirectory:(NSString *)directory; -(XADAction)archive:(XADArchive *)archive entry:(int)n collidesWithFile:(NSString *)file newFilename:(NSString **)newname; -(XADAction)archive:(XADArchive *)archive entry:(int)n collidesWithDirectory:(NSString *)file newFilename:(NSString **)newname; -(XADAction)archive:(XADArchive *)archive creatingDirectoryDidFailForEntry:(int)n; -(void)archive:(XADArchive *)archive extractionOfEntryWillStart:(int)n; -(void)archive:(XADArchive *)archive extractionProgressForEntry:(int)n bytes:(xadSize)bytes of:(xadSize)total; -(void)archive:(XADArchive *)archive extractionOfEntryDidSucceed:(int)n; -(XADAction)archive:(XADArchive *)archive extractionOfEntryDidFail:(int)n error:(XADError)error; -(XADAction)archive:(XADArchive *)archive extractionOfResourceForkForEntryDidFail:(int)n error:(XADError)error; -(void)archive:(XADArchive *)archive extractionProgressBytes:(xadSize)bytes of:(xadSize)total; -(void)archive:(XADArchive *)archive extractionProgressFiles:(int)files of:(int)total; -(void)archive:(XADArchive *)archive immediateExtractionInputProgressBytes:(xadSize)bytes of:(xadSize)total; @end
2,063
5,169
<gh_stars>1000+ { "name": "HelloTrello", "version": "0.3.0", "summary": "A Swift library to interact with the Trello API", "description": "HelloTrello is a simple Swift API to interact with the Trello API. It uses auth-tokens and currenly only supports basic GET requests.", "homepage": "https://github.com/livio/HelloTrello", "license": "BSD 3-Clause", "authors": { "<NAME>": "<EMAIL>" }, "source": { "git": "https://github.com/livio/HelloTrello.git", "tag": "0.3.0" }, "platforms": { "ios": "10.0", "tvos": "10.0", "osx": "10.12" }, "requires_arc": true, "source_files": "Pod/Classes/**/*", "dependencies": { "Alamofire": [ "~> 4.6" ], "AlamofireImage": [ "~> 3.3" ] } }
334
6,098
<reponame>ahmedengu/h2o-3 import water.udf.CFunc2 as Func class TestCFunc2(Func): """ Compute sum of actual + predict """ def apply(self, rowActual, rowPredict): return sum(rowActual.readDoubles()) + sum(rowPredict.readDoubles())
107
2,838
<gh_stars>1000+ /** * Copyright (c) Glow Contributors. See CONTRIBUTORS file. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "glow/Importer/ProtobufLoader.h" #include "llvm/Support/CommandLine.h" #include <string> namespace glow { llvm::cl::OptionCategory loaderOptCat("Model Loader Options"); static llvm::cl::opt<bool> isConstFoldLoaderOps( "const-fold-ops", llvm::cl::desc( "Performs constant folding on ONNX and Caffe Operators while loading."), llvm::cl::init(true), llvm::cl::cat(loaderOptCat)); bool isArrayConstant(llvm::ArrayRef<size_t> a) { for (size_t i = 1; i < a.size(); i++) if (a[0] != a[i]) return false; return true; } void setConstantFoldLoaderOpsFlag(bool flag) { isConstFoldLoaderOps = flag; } bool getConstantFoldLoaderOpsFlag() { return isConstFoldLoaderOps; } bool ProtobufLoader::isConstantFoldable(llvm::ArrayRef<NodeValue> inputs, std::string typeName) const { int numInputs = inputs.size(); if (!getConstantFoldLoaderOpsFlag()) { return false; } // foldUnsupportedTypes: List of typenames unsupported for folding. std::string foldUnsupportedTypes[] = {"Constant", "Loop", "If"}; std::string *findType = std::find(std::begin(foldUnsupportedTypes), std::end(foldUnsupportedTypes), typeName); // Early exit if folding is not supported for current operator. if (findType != std::end(foldUnsupportedTypes)) { return false; } // If all the inputs to the operator are constant this op can be folded. for (int i = 0; i < numInputs; i++) { if (inputs[i].getNode()->getKind() != Kinded::Kind::ConstantKind) { return false; } } return true; } Placeholder * ProtobufLoader::getStaticPlaceholderByNameOrNull(llvm::StringRef name) const { auto it = nodeValueByName_.find(name); if (it == nodeValueByName_.end()) { return nullptr; } auto *res = llvm::dyn_cast<Placeholder>(it->second.getNode()); return (res && res->isStatic()) ? res : nullptr; } Constant *ProtobufLoader::getConstantByNameOrNull(llvm::StringRef name) const { auto it = nodeValueByName_.find(name); if (it == nodeValueByName_.end()) { return nullptr; } auto *res = llvm::dyn_cast<Constant>(it->second.getNode()); return res ? res : nullptr; } Expected<Constant *> ProtobufLoader::getConstantByName(llvm::StringRef name) const { auto *ptr = getConstantByNameOrNull(name); RETURN_ERR_IF_NOT( ptr, strFormat("could not find constant with name %s", name.data())); return ptr; } bool ProtobufLoader::hasConstantByName(llvm::StringRef name) const { return getConstantByNameOrNull(name) != nullptr; } Expected<Placeholder *> ProtobufLoader::getSingleOutput() const { RETURN_ERR_IF_NOT(outputVarsByName_.size() == 1, "There must be only one output."); return outputVarsByName_.begin()->second; } Expected<Placeholder *> ProtobufLoader::getSingleInput() const { RETURN_ERR_IF_NOT(inputVarsByName_.size() == 1, "There must be only one input."); return inputVarsByName_.begin()->second; } Expected<Placeholder *> ProtobufLoader::getOutputByName(llvm::StringRef name) const { auto it = outputVarsByName_.find(name); RETURN_ERR_IF_NOT( it != outputVarsByName_.end(), llvm::Twine("No external output Variable was registered with name ", name) .str()); return it->second; } Expected<Placeholder *> ProtobufLoader::getInputByName(llvm::StringRef name) const { auto it = inputVarsByName_.find(name); RETURN_ERR_IF_NOT( it != inputVarsByName_.end(), llvm::Twine("No external input Variable was registered with name ", name) .str()); return it->second; } NodeValue ProtobufLoader::getNodeValueByNameOrNullNodeValue(llvm::StringRef name, bool ignoreSrcFun) { auto it = nodeValueByName_.find(name); if (it == nodeValueByName_.end()) { return NodeValue(nullptr); } // Always return the NV of a storage Node since Storage lives in the Module // and is accessible to any Node. NodeValue NV = it->second; if (llvm::isa<Storage>(NV)) { return NV; } // Check if the current Function G_ we are loading into is the same as the // Function of the NV we found; if so then return it. Function *srcF = NV.getNode()->getParent(); if (srcF == G_ || ignoreSrcFun) { return NV; } // Otherwise we must be looking up a NV from a different Function in the // Module, so look for an intermediate Placeholder linking the two if it // exists, or otherwise create one and remember it. assert(partNameToFun_.size() > 0 && "Must be loading a pre-partitioned model."); auto itPH = intermediatePHsByName_.find(name); Placeholder *intermedPH = nullptr; // Create the intermediate PH and SaveNode if it does not yet exist. Note that // we store these intermediate PHs separately from nodeValueByName_ because we // want future users from the same Function as the NV to still use the Node // directly through nodeValueByName_. if (itPH == intermediatePHsByName_.end()) { auto *save = srcF->createSave("tmp_" + NV.getNode()->getName().str(), NV); intermedPH = save->getPlaceholder(); intermediatePHsByName_[name] = intermedPH; } else { intermedPH = itPH->second; } return intermedPH->getOutput(); } Expected<NodeValue> ProtobufLoader::getNodeValueByName(llvm::StringRef name, bool ignoreSrcFun) { RETURN_ERR_IF_NOT(hasNodeByName(name), llvm::Twine("No node under name ", name).str()); auto node = getNodeValueByNameOrNullNodeValue(name, ignoreSrcFun); RETURN_ERR_IF_NOT(node.getNode(), "Null is under that name??"); return node; } Error ProtobufLoader::createAndRegisterConstant(llvm::StringRef name, Tensor &&tensor, const std::string &layout) { auto it = nodeValueByName_.find(name); if (it != nodeValueByName_.end()) { if (llvm::dyn_cast<Placeholder>(it->second.getNode())) { // Placeholders take precedence over Constants. return Error::success(); } } // Note: We do not support training from models loaded from protos, so // trainable is always set to false here. Constant *node = mod_.createConstant(name, std::move(tensor), layout); nodeValueByName_[name] = node->getOutput(); return Error::success(); } void ProtobufLoader::deleteUnusedConstants() { std::vector<std::string> nodeValuesToRemove; // Note that it's possible a constant is referred by more than one names // (e.g., via Identity operator). Therefore, we maintain a set of constants to // erase separately from the list for names. std::unordered_set<Constant *> constantToRemove; for (auto &kv : nodeValueByName_) { auto *node = kv.second.getNode(); if (auto *c = llvm::dyn_cast<Constant>(node)) { if (!c->hasUsers()) { nodeValuesToRemove.push_back(kv.getKey().str()); constantToRemove.insert(c); } } } for (auto &name : nodeValuesToRemove) { auto it = nodeValueByName_.find(name); DCHECK(llvm::isa<Constant>(it->second.getNode())) << "NodeValue with name " << name << " was expected to have been a Constant"; nodeValueByName_.erase(it); } for (auto *c : constantToRemove) { G_->getParent()->eraseConstant(c); } } Expected<Placeholder *> ProtobufLoader::createAndRegisterPlaceholder(llvm::StringRef name, TypeRef T, bool isStatic, bool isTrainable, const std::string &layout) { RETURN_ERR_IF_NOT( !hasNodeByName(name), llvm::Twine("Creating an already existing node ", name).str()); RETURN_ERR_IF_NOT(!mod_.hasStorageName(name), strFormat("A Placeholder was already registered by name %s", name.data())); Placeholder *node = mod_.createPlaceholder(T, name, isTrainable, layout); node->setStatic(isStatic); nodeValueByName_[name] = node->getOutput(); return node; } bool ProtobufLoader::hasNodeByName(llvm::StringRef name) const { return nodeValueByName_.find(name) != nodeValueByName_.end(); } ProtobufLoader::ProtobufLoader(llvm::ArrayRef<const char *> tensorNames, llvm::ArrayRef<TypeRef> types, Module &mod, Error *errPtr, bool loadIntoExistingModule, OriginNameToTQPMap *originNameToTQPMap, bool loadUniquedDummyQParams, bool replaceDummyTQPs, bool zeroScaleFP16Clip, bool clipQuantRangeToFP16) : G_(nullptr), mod_(mod), loadIntoExistingModule_(loadIntoExistingModule), originNameToTQPMap_(originNameToTQPMap), loadUniquedDummyQParams_(loadUniquedDummyQParams), replaceDummyTQPs_(replaceDummyTQPs), zeroScaleFP16Clip_(zeroScaleFP16Clip), clipQuantRangeToFP16_(clipQuantRangeToFP16) { setupLoader(tensorNames, types, errPtr); } ProtobufLoader::ProtobufLoader(llvm::ArrayRef<const char *> tensorNames, llvm::ArrayRef<TypeRef> types, Function *F, Error *errPtr, bool loadIntoExistingModule, OriginNameToTQPMap *originNameToTQPMap, bool loadUniquedDummyQParams, bool replaceDummyTQPs, bool zeroScaleFP16Clip, bool clipQuantRangeToFP16) : G_(F), mod_(*F->getParent()), loadIntoExistingModule_(loadIntoExistingModule), originNameToTQPMap_(originNameToTQPMap), loadUniquedDummyQParams_(loadUniquedDummyQParams), replaceDummyTQPs_(replaceDummyTQPs), zeroScaleFP16Clip_(zeroScaleFP16Clip), clipQuantRangeToFP16_(clipQuantRangeToFP16) { setupLoader(tensorNames, types, errPtr); } void ProtobufLoader::setupLoader(llvm::ArrayRef<const char *> tensorNames, llvm::ArrayRef<TypeRef> types, Error *errPtr) { // Verify that the version of the library that we linked against is // compatible with the version of the headers we compiled against. GOOGLE_PROTOBUF_VERIFY_VERSION; // if errPtr already contains an error then don't continue with constructor if (errPtr && *errPtr) { return; } // Use the global flag as default. This may be overridden by instantiations of // the loader later on. constFoldInLoader_ = getConstantFoldLoaderOpsFlag(); // Lambda to setup the ProtobufLoader and return any Errors that were // raised. auto setup = [&]() -> Error { RETURN_ERR_IF_NOT(tensorNames.size() == types.size(), "Invalid initialization list"); for (size_t i = 0, e = tensorNames.size(); i < e; i++) { RETURN_ERR_IF_NOT(!hasNodeByName(tensorNames[i]), "Input names have duplicate"); TypeRef T = types[i]; if (T->isQuantizedType() && !T->isFusedQuantizedType()) { RETURN_ERR_IF_NOT(!clipQuantRangeToFP16_, strFormat("Do not support clipQuantRangeToFP16 with " "unfused quantized input Placeholders: %s", tensorNames[i])); // Note: Never shift here, because these are the types that were already // imported/defined based on Glow. ASSIGN_VALUE_OR_RETURN_ERR( T, loadQuantTy(tensorNames[i], T->getElementType(), T->dims(), T->getScale(), T->getOffset(), /* shiftUInt8ToInt8 */ false)); } Placeholder *placeholder; ASSIGN_VALUE_OR_RETURN_ERR( placeholder, createAndRegisterPlaceholder(tensorNames[i], T)); inputVarsByName_.try_emplace(tensorNames[i], placeholder); } return Error::success(); }; if (errPtr) { *errPtr = setup(); } else { EXIT_ON_ERR(setup()); } } Expected<TensorQuantizationParams> ProtobufLoader::getUpdatedTQP(int32_t uniqueOffsetIdx) { RETURN_ERR_IF_NOT(replaceDummyTQPs_, "replaceDummyTQPs_ was not enabled"); RETURN_ERR_IF_NOT( uniqueOffsetIdx < int32_t(updatedTQPs_.size()), strFormat("Unexpected size of updated TQPs %lu vs. dummy offset %d", updatedTQPs_.size(), uniqueOffsetIdx)); return updatedTQPs_[uniqueOffsetIdx]; } Expected<TypeRef> ProtobufLoader::loadQuantTy(const std::string &name, ElemKind k, llvm::ArrayRef<dim_t> dims, float scale, int32_t offset, bool shiftUInt8ToInt8, bool skipClipQuantRangeToFP16) { // If we have Int8QTy, we may have loaded as UInt8, and so will need to shift // to align to Glow's Int8QTy. if (k == ElemKind::Int8QTy && shiftUInt8ToInt8) { offset -= UINT8_TO_INT8_SHIFT; } // If we don't have a map to track dummy unique offsets to loader names, then // just load as normal with the actual scale/offset we loaded. if (!loadUniquedDummyQParams_) { // If clipping qparams to fp16 range then do so here. if (clipQuantRangeToFP16_ && !skipClipQuantRangeToFP16) { const auto qMinMax = getQuantizedValueRange(scale, offset, k); const float newMin = std::max(qMinMax.first, kMinFP16); const float newMax = std::min(qMinMax.second, kMaxFP16); const TensorQuantizationParams newQParams = chooseQuantizationParams( {newMin, newMax}, quantization::Asymmetric, k); scale = newQParams.scale; offset = newQParams.offset; } // If we are clipping qparam scales below the kMinScaleFP16 threshold to // kMinScaleFP16 then do so here. if (zeroScaleFP16Clip_ && scale < kMinScaleFP16) { scale = kMinScaleFP16; } if (originNameToTQPMap_) { bool inserted = originNameToTQPMap_ ->emplace(name, TensorQuantizationParams{scale, offset}) .second; RETURN_ERR_IF_NOT(inserted, "Already inserted TQP for " + name); } return mod_.uniqueType(k, dims, scale, offset); } RETURN_ERR_IF_NOT(originNameToTQPMap_, "Must have valid originNameToTQPMap_ when loading " "uniqued dummy qparams."); // We use dummyScale to represent a dummy scale/offset pair. Make sure the // original model did not have dummyScale, since we will use it later on to // verify all qparams are now dummies. RETURN_ERR_IF_NOT(scale != dummyScale, "Found dummy scale for " + name); // For uniqued scale/offset, ignore the actual loaded values. Instead use // dummyScale to signal these quant params are dummies, and then a uniqued // incremented offset to represent this unique quant param pair. Save the name // of the C2 edge that we loaded to use these quant params in the cctx so we // can ue it in the future. The index the name is at represents which unique // index it is mapped to. RETURN_ERR_IF_NOT(int32_t(originNameToTQPMap_->size()) == currUniqueOffset_, "Unexpected size encountered for qparam origin tracking"); const int32_t thisUniqueOffset = currUniqueOffset_++; bool inserted = originNameToTQPMap_ ->emplace(name, TensorQuantizationParams{dummyScale, thisUniqueOffset}) .second; RETURN_ERR_IF_NOT(inserted, "Already inserted TQP for " + name); return mod_.uniqueType(k, dims, dummyScale, thisUniqueOffset); } }; // namespace glow
6,652
473
<gh_stars>100-1000 /* * Copyright (C) <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "libcgc.h" #include "cgc_account.h" #include "cgc_option.h" uint32_t ORDER_ID = 1; option_holding_t * cgc_match_holding(uint32_t acct_id, option_order_t *order){ for(int i =0; i < MAX_HOLDINGS; i++){ option_holding_t *h = &(cgc_ACCOUNTS[acct_id].holdings[i]); int mcp = cgc_memcmp(order->symbol, h->symbol, SYM_SIZE); if(mcp == 0) return h; } return NULL; } option_holding_t *cgc_add_holding(uint32_t acct_id, option_order_t *order){ option_holding_t *n = cgc_match_holding(acct_id, order); // assume everyone has a bit of every holding if(n == NULL) cgc__terminate(77); // dont rollover just keep it as the max if(n->qty != 0xFFFFFFFF) n->qty = n->qty + order->qty; return n; } option_holding_t *cgc_dec_holding(uint32_t acct_id, option_order_t *order){ option_holding_t *n = cgc_match_holding(acct_id, order); // assume everyone has a bit of every holding if(n == NULL) cgc__terminate(55); if(n->qty > order->qty) n->qty = n->qty - order->qty; return n; } OP_ERR cgc_fill_order(uint32_t acct_id, option_order_t *order, orderbook_order_t *matched_order){ // this also assumes that the order account has the balance option_order_t *match = &(matched_order->contract); float purchase_price = order->qty * match->price; // decrement the account balance on buy side cgc_ACCOUNTS[acct_id].balance = cgc_ACCOUNTS[acct_id].balance - purchase_price; cgc_ACCOUNTS[matched_order->acct_id].balance = cgc_ACCOUNTS[matched_order->acct_id].balance + purchase_price; // this is the sell order option_holding_t *debit_holding = cgc_match_holding(matched_order->acct_id, order); if(debit_holding == NULL) cgc__terminate(88); cgc_dec_holding(matched_order->acct_id, order); // this is buy order option_holding_t * credit_holding = cgc_match_holding(acct_id, order); if(credit_holding == NULL) cgc__terminate(88); cgc_add_holding(acct_id, order); if(order->qty > match->qty) cgc__terminate(102); match->qty = match->qty - order->qty; if(match->qty == 0){ match->symbol[0] = 0x00; match->price = 0; return ORDERFILL; } if(credit_holding->qty == 0xFFFFFFFF) return QTY_OVERFLOW; return OK; } int cgc_match_symbol(option_order_t *l, option_order_t *r){ return cgc_memcmp(l->symbol, r->symbol, SYM_SIZE); } float cgc_get_current_ask(char * sym){ float price = 0.0; uint32_t low_oid = 0; for(int i = 0; i < NUM_ORDERS; i++){ if(ORDERBOOK[i].direction == SELL){ int mc = cgc_memcmp(ORDERBOOK[i].contract.symbol, sym, SYM_SIZE); if(mc == 0 && ORDERBOOK[i].contract.qty > 0){ if(ORDERBOOK[i].order_id < low_oid || low_oid == 0){ low_oid = ORDERBOOK[i].order_id; price = ORDERBOOK[i].contract.price; } } } } return price; } orderbook_order_t * cgc_find_sell_order(option_order_t *order){ uint32_t low_oid = 0; orderbook_order_t * o = NULL; for(int i =0; i < NUM_ORDERS; ++i){ option_order_t *potential = &(ORDERBOOK[i].contract); int ms = 1; if(potential != NULL && potential->symbol[0] != 0x0){ ms = cgc_match_symbol(potential, order); } if(potential != NULL && ms == 0 && potential->price <= order->price && potential->qty > 0 && potential->qty >= order->qty){ if(low_oid == 0){ low_oid = ORDERBOOK[i].order_id; o = &(ORDERBOOK[i]); }else if(ORDERBOOK[i].order_id < low_oid ){ low_oid = ORDERBOOK[i].order_id; o = &(ORDERBOOK[i]); } } } return o; } OP_ERR cgc_add_to_order_book(option_order_t *o, uint32_t acct_id, OP_TYPE direction){ for(int i = 0; i < NUM_ORDERS; ++i){ if(ORDERBOOK[i].contract.symbol[0] == 0x0){ char * obsym = ORDERBOOK[i].contract.symbol; cgc_memcpy(o->symbol, obsym, SYM_SIZE); ORDERBOOK[i].contract.qty = o->qty; ORDERBOOK[i].contract.price = o->price; ORDERBOOK[i].acct_id = acct_id; if(direction != BUY && direction != SELL) cgc__terminate(99); ORDERBOOK[i].direction = direction; ORDERBOOK[i].order_id = ORDER_ID; ORDER_ID += 1; return SELL_ADD; } } return ORDERS_FULL; } OP_ERR cgc_check_account_balance_can_buy(option_order_t *o, uint32_t acct_id){ if(acct_id < NUM_ACCOUNTS){ if((o->qty * o->price) < cgc_ACCOUNTS[acct_id].balance){ return OK; } else{ return NO_FILL; } } return BAD_ACCT; } void cgc_generic_resp(packet_t *resp, OP_TYPE ot, uint32_t acct_id){ resp->rt = RESPONSE; resp->ot = ot; resp->acct_id = acct_id; } cgc_size_t cgc_gen_order_fill_msg(packet_t *resp, OP_TYPE ot, char * sym, uint32_t qty, uint32_t acct_id){ cgc_generic_resp(resp, ot, acct_id); if(ot != BUY && ot != SELL) cgc__terminate(99); orderfill_t *of = (orderfill_t *) &(resp->op_data); cgc_memcpy(sym, &(of->symbol), SYM_SIZE); of->qty = qty; return sizeof(orderfill_t)-sizeof(void *); } OP_ERR cgc_check_account_holding_in_qty_sell(option_order_t *sell_order, uint32_t acct_id){ account_record_t *ar = &(cgc_ACCOUNTS[acct_id]); for(int i = 0; i < MAX_HOLDINGS; i++){ option_holding_t *h = &(ar->holdings[i]); if(cgc_memcmp(h->symbol, sell_order->symbol, SYM_SIZE) == OK){ if(sell_order->qty <= h->qty){ return OK; } else { return NO_HOLD; } } } return NO_MATCH; } OP_ERR cgc_fill_buy_order(uint32_t acct_id, orderbook_order_t *matched_order, option_order_t *order){ if(matched_order == NULL){ return NO_MATCH; } return cgc_fill_order(acct_id, order, matched_order); } OP_ERR cgc_run_option_transaction(uint32_t acct_id, option_order_t *order, OP_TYPE ot){ if(ot == BUY){ OP_ERR e = cgc_check_account_balance_can_buy(order, acct_id); if(e != OK) return e; // we reject buy orders with no sell order orderbook_order_t * matched_order = cgc_find_sell_order(order); OP_ERR ofe = cgc_fill_buy_order(acct_id, matched_order, order); if(ofe != OK) return ofe; return OK; } else if(ot == SELL){ OP_ERR e = cgc_check_account_holding_in_qty_sell(order, acct_id); if(e != OK) return e; return cgc_add_to_order_book(order, acct_id, SELL); } return UNK_E; }
3,022
887
<reponame>qiyanz/neuropod {"node_name_mapping": {"y": "some_namespace/in_y:0", "x": "some_namespace/in_x:0", "out": "some_namespace/out:0"}, "init_op_names": {}}
73
482
package io.cattle.platform.servicediscovery.api.util; import io.cattle.platform.allocator.service.AllocationHelper; import io.cattle.platform.archaius.util.ArchaiusUtil; import io.cattle.platform.core.addon.InServiceUpgradeStrategy; import io.cattle.platform.core.addon.InstanceHealthCheck; import io.cattle.platform.core.constants.AgentConstants; import io.cattle.platform.core.constants.InstanceConstants; import io.cattle.platform.core.constants.ServiceConstants; import io.cattle.platform.core.model.Instance; import io.cattle.platform.core.model.Service; import io.cattle.platform.core.model.Stack; import io.cattle.platform.core.util.PortSpec; import io.cattle.platform.core.util.SystemLabels; import io.cattle.platform.docker.client.DockerImage; import io.cattle.platform.object.meta.ObjectMetaDataManager; import io.cattle.platform.object.util.DataAccessor; import io.cattle.platform.object.util.DataUtils; import io.cattle.platform.servicediscovery.api.resource.ServiceDiscoveryConfigItem; import io.cattle.platform.util.type.CollectionUtils; import io.github.ibuildthecloud.gdapi.validation.ValidationErrorCodes; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.tuple.Pair; import com.netflix.config.DynamicStringProperty; public class ServiceDiscoveryUtil { public static final List<String> SERVICE_INSTANCE_NAME_DIVIDORS = Arrays.asList("-", "_"); private static final int LB_HEALTH_CHECK_PORT = 42; private static final DynamicStringProperty LB_DRAIN_IMAGE_VERSION = ArchaiusUtil.getString("loadbalancher.drain.image.version"); private static final DynamicStringProperty LB_IMAGE_UUID = ArchaiusUtil.getString("lb.instance.image.uuid"); public static String getInstanceName(Instance instance) { if (instance != null && instance.getRemoved() == null) { return instance.getUuid(); } else { return null; } } public static String getServiceSuffixFromInstanceName(String instanceName) { for (String divider : SERVICE_INSTANCE_NAME_DIVIDORS) { if (!instanceName.contains(divider)) { continue; } String serviceSuffix = instanceName.substring(instanceName.lastIndexOf(divider) + 1); if (!StringUtils.isEmpty(serviceSuffix) && serviceSuffix.matches("\\d+")) { return serviceSuffix; } } return ""; } @SuppressWarnings("unchecked") public static List<String> getServiceLaunchConfigNames(Service service) { Map<String, Object> originalData = new HashMap<>(); originalData.putAll(DataUtils.getFields(service)); List<String> launchConfigNames = new ArrayList<>(); // put the primary config in launchConfigNames.add(ServiceConstants.PRIMARY_LAUNCH_CONFIG_NAME); // put the secondary configs in Object secondaryLaunchConfigs = originalData .get(ServiceConstants.FIELD_SECONDARY_LAUNCH_CONFIGS); if (secondaryLaunchConfigs != null) { for (Map<String, Object> secondaryLaunchConfig : (List<Map<String, Object>>) secondaryLaunchConfigs) { launchConfigNames.add(String.valueOf(secondaryLaunchConfig.get("name"))); } } return launchConfigNames; } public static Map<String, Object> getLaunchConfigWithServiceDataAsMap(Service service, String launchConfigName) { Map<String, Object> data = new HashMap<>(); // 1) get service data data.putAll(DataUtils.getFields(service)); // 2) remove launchConfig/secondaryConfig data Object launchConfig = data .get(ServiceConstants.FIELD_LAUNCH_CONFIG); if (launchConfig != null) { data.remove(ServiceConstants.FIELD_LAUNCH_CONFIG); } Object secondaryLaunchConfigs = data .get(ServiceConstants.FIELD_SECONDARY_LAUNCH_CONFIGS); if (secondaryLaunchConfigs != null) { data.remove(ServiceConstants.FIELD_SECONDARY_LAUNCH_CONFIGS); } // 3) populate launch config data data.putAll(getLaunchConfigDataAsMap(service, launchConfigName)); return data; } @SuppressWarnings("unchecked") public static Map<String, Map<Object, Object>> getServiceLaunchConfigsWithNames(Service service) { Map<String, Object> originalData = new HashMap<>(); originalData.putAll(DataUtils.getFields(service)); Map<String, Map<Object, Object>> launchConfigsWithNames = new HashMap<>(); // put the primary config in launchConfigsWithNames.put(ServiceConstants.PRIMARY_LAUNCH_CONFIG_NAME, CollectionUtils.toMap(originalData .get(ServiceConstants.FIELD_LAUNCH_CONFIG))); // put the secondary configs in Object secondaryLaunchConfigs = originalData .get(ServiceConstants.FIELD_SECONDARY_LAUNCH_CONFIGS); if (secondaryLaunchConfigs != null) { for (Map<String, Object> secondaryLaunchConfig : (List<Map<String, Object>>) secondaryLaunchConfigs) { launchConfigsWithNames.put(String.valueOf(secondaryLaunchConfig.get("name")), CollectionUtils.toMap(secondaryLaunchConfig)); } } return launchConfigsWithNames; } @SuppressWarnings("unchecked") public static Map<String, String> getMergedServiceLabels(Service service, AllocationHelper allocationHelper) { List<String> launchConfigNames = getServiceLaunchConfigNames(service); Map<String, String> labelsStr = new HashMap<>(); for (String currentLaunchConfigName : launchConfigNames) { Map<String, Object> data = getLaunchConfigDataAsMap(service, currentLaunchConfigName); Object l = data.get(ServiceDiscoveryConfigItem.LABELS.getCattleName()); if (l != null) { Map<String, String> labels = (HashMap<String, String>) l; allocationHelper.mergeLabels(labels, labelsStr); } } return labelsStr; } @SuppressWarnings("unchecked") public static Map<String, String> getLaunchConfigLabels(Service service, String launchConfigName) { if (launchConfigName == null) { launchConfigName = ServiceConstants.PRIMARY_LAUNCH_CONFIG_NAME; } Map<String, Object> data = getLaunchConfigDataAsMap(service, launchConfigName); Object labels = data.get(InstanceConstants.FIELD_LABELS); if (labels == null) { return new HashMap<String, String>(); } return (Map<String, String>) labels; } @SuppressWarnings("unchecked") public static Map<String, Object> getLaunchConfigDataAsMap(Service service, String launchConfigName) { if (launchConfigName == null) { launchConfigName = ServiceConstants.PRIMARY_LAUNCH_CONFIG_NAME; } Map<String, Object> launchConfigData = new HashMap<>(); if (launchConfigName.equalsIgnoreCase(ServiceConstants.PRIMARY_LAUNCH_CONFIG_NAME)) { launchConfigData = DataAccessor.fields(service) .withKey(ServiceConstants.FIELD_LAUNCH_CONFIG).withDefault(Collections.EMPTY_MAP) .as(Map.class); // if the value is empty, do not export ArrayList<String> deletedKeys = new ArrayList<>(); for (String key: launchConfigData.keySet()) { if (launchConfigData.get(key) == null) { deletedKeys.add(key); } } for (String key: deletedKeys) { launchConfigData.remove(key); } } else { List<Map<String, Object>> secondaryLaunchConfigs = DataAccessor.fields(service) .withKey(ServiceConstants.FIELD_SECONDARY_LAUNCH_CONFIGS) .withDefault(Collections.EMPTY_LIST).as( List.class); for (Map<String, Object> secondaryLaunchConfig : secondaryLaunchConfigs) { if (secondaryLaunchConfig.get("name").toString().equalsIgnoreCase(launchConfigName)) { launchConfigData = secondaryLaunchConfig; break; } } } Map<String, Object> data = new HashMap<>(); data.putAll(launchConfigData); Object labels = data.get(ServiceDiscoveryConfigItem.LABELS.getCattleName()); if (labels != null) { Map<String, String> labelsMap = new HashMap<String, String>(); labelsMap.putAll((Map<String, String>) labels); // overwrite with a copy of the map data.put(ServiceDiscoveryConfigItem.LABELS.getCattleName(), labelsMap); } return data; } public static Object getLaunchConfigObject(Service service, String launchConfigName, String objectName) { Map<String, Object> serviceData = ServiceDiscoveryUtil.getLaunchConfigDataAsMap(service, launchConfigName); return serviceData.get(objectName); } public static String generateServiceInstanceName(Stack env, Service service, String launchConfigName, int finalOrder) { String configName = launchConfigName == null || launchConfigName.equals(ServiceConstants.PRIMARY_LAUNCH_CONFIG_NAME) ? "" : launchConfigName + "-"; String name = String.format("%s-%s-%s%d", env.getName(), service.getName(), configName, finalOrder); return name; } public static boolean isServiceGeneratedName(Stack env, Service service, String instanceName) { for (String divider : SERVICE_INSTANCE_NAME_DIVIDORS) { if (instanceName.startsWith(String.format("%s%s%s", env.getName(), divider, service.getName()))) { return true; } } return false; } public static String getGeneratedServiceIndex(Stack env, Service service, String launchConfigName, String instanceName) { if (!isServiceGeneratedName(env, service, instanceName)) { return null; } Integer charAt = instanceName.length()-1; for (int i = instanceName.length() - 1; i > 0; i--) { if (instanceName.charAt(i) == '-' || instanceName.charAt(i) == '_') { break; } charAt = i; } return instanceName.substring(charAt, instanceName.length()); } @SuppressWarnings("unchecked") public static Map<String, Object> buildServiceInstanceLaunchData(Service service, Map<String, Object> deployParams, String launchConfigName, AllocationHelper allocationHelper) { Map<String, Object> serviceData = getLaunchConfigDataAsMap(service, launchConfigName); Map<String, Object> launchConfigItems = new HashMap<>(); // 1. put all parameters retrieved through deployParams if (deployParams != null) { launchConfigItems.putAll(deployParams); } // 2. Get parameters defined on the service level (merge them with the ones defined in for (String key : serviceData.keySet()) { Object dataObj = serviceData.get(key); if (launchConfigItems.get(key) != null) { if (dataObj instanceof Map) { // unfortunately, need to make an except for labels due to the merging aspect of the values if (key.equalsIgnoreCase(InstanceConstants.FIELD_LABELS)) { allocationHelper.normalizeLabels( service.getStackId(), (Map<String, String>) launchConfigItems.get(key), (Map<String, String>) dataObj); allocationHelper.mergeLabels((Map<String, String>) launchConfigItems.get(key), (Map<String, String>) dataObj); } else { ((Map<Object, Object>) dataObj).putAll((Map<Object, Object>) launchConfigItems.get(key)); } } else if (dataObj instanceof List) { for (Object existing : (List<Object>) launchConfigItems.get(key)) { if (!((List<Object>) dataObj).contains(existing)) { ((List<Object>) dataObj).add(existing); } } } } if (dataObj != null) { launchConfigItems.put(key, dataObj); } } // 3. add extra parameters launchConfigItems.put("accountId", service.getAccountId()); if (!launchConfigItems.containsKey(ObjectMetaDataManager.KIND_FIELD)) { launchConfigItems.put(ObjectMetaDataManager.KIND_FIELD, InstanceConstants.KIND_CONTAINER); } return launchConfigItems; } public static boolean isNoopService(Service service) { Object imageUUID = ServiceDiscoveryUtil.getLaunchConfigDataAsMap(service, ServiceConstants.PRIMARY_LAUNCH_CONFIG_NAME).get( InstanceConstants.FIELD_IMAGE_UUID); return (service.getSelectorContainer() != null && (imageUUID == null || imageUUID.toString().toLowerCase() .contains(ServiceConstants.IMAGE_NONE))) || isNoopLBService(service); } public static boolean isNoopLBService(Service service) { Object imageUUID = ServiceDiscoveryUtil.getLaunchConfigDataAsMap(service, ServiceConstants.PRIMARY_LAUNCH_CONFIG_NAME).get( InstanceConstants.FIELD_IMAGE_UUID); return service.getKind().equalsIgnoreCase(ServiceConstants.KIND_LOAD_BALANCER_SERVICE) && imageUUID != null && imageUUID.toString().toLowerCase() .contains(ServiceConstants.IMAGE_NONE); } public static void upgradeServiceConfigs(Service service, InServiceUpgradeStrategy strategy, boolean rollback) { updatePrimaryLaunchConfig(strategy, service, rollback); updateSecondaryLaunchConfigs(strategy, service, rollback); } @SuppressWarnings("unchecked") protected static void updateSecondaryLaunchConfigs(InServiceUpgradeStrategy strategy, Service service, boolean rollback) { Object newLaunchConfigs = null; if (rollback) { newLaunchConfigs = strategy.getPreviousSecondaryLaunchConfigs(); } else { newLaunchConfigs = strategy.getSecondaryLaunchConfigs(); Map<String, Map<String, Object>> newLaunchConfigNames = new HashMap<>(); if (newLaunchConfigs != null) { for (Map<String, Object> newLaunchConfig : (List<Map<String, Object>>) newLaunchConfigs) { newLaunchConfigNames.put(newLaunchConfig.get("name").toString(), newLaunchConfig); } Object oldLaunchConfigs = strategy.getPreviousSecondaryLaunchConfigs(); for (Map<String, Object> oldLaunchConfig : (List<Map<String, Object>>)oldLaunchConfigs) { Map<String, Object> newLaunchConfig = newLaunchConfigNames .get(oldLaunchConfig.get("name")); if (newLaunchConfig != null) { preserveOldRandomPorts(service, newLaunchConfig, oldLaunchConfig); } } } } DataAccessor.fields(service).withKey(ServiceConstants.FIELD_SECONDARY_LAUNCH_CONFIGS) .set(newLaunchConfigs); } @SuppressWarnings("unchecked") protected static void updatePrimaryLaunchConfig(InServiceUpgradeStrategy strategy, Service service, boolean rollback) { Map<String, Object> newLaunchConfig = null; if (rollback) { newLaunchConfig = (Map<String, Object>) strategy.getPreviousLaunchConfig(); } else { newLaunchConfig = (Map<String, Object>) strategy.getLaunchConfig(); Map<String, Object> oldLaunchConfig = (Map<String, Object>) strategy.getPreviousLaunchConfig(); preserveOldRandomPorts(service, newLaunchConfig, oldLaunchConfig); } DataAccessor.fields(service).withKey(ServiceConstants.FIELD_LAUNCH_CONFIG) .set(newLaunchConfig); } protected static void preserveOldRandomPorts(Service service, Map<String, Object> newLaunchConfig, Map<String, Object> oldLaunchConfig) { Map<Integer, PortSpec> oldPortMap = getServicePortsMap(service, oldLaunchConfig); Map<Integer, PortSpec> newPortMap = getServicePortsMap(service, newLaunchConfig); boolean changedNewPorts = false; for(Integer privatePort : newPortMap.keySet()) { if(newPortMap.get(privatePort).getPublicPort() == null) { if (oldPortMap.containsKey(privatePort)) { newPortMap.get(privatePort).setPublicPort(oldPortMap.get(privatePort).getPublicPort()); changedNewPorts = true; } } } if(changedNewPorts) { List<String> newPorts = new ArrayList<>(); for (Map.Entry<Integer, PortSpec> entry : newPortMap.entrySet()) { newPorts.add(entry.getValue().toSpec()); } if (!newPorts.isEmpty()) { newLaunchConfig.put(InstanceConstants.FIELD_PORTS, newPorts); } } } @SuppressWarnings("unchecked") protected static Map<Integer, PortSpec> getServicePortsMap(Service service, Map<String, Object> launchConfigData) { if (launchConfigData.get(InstanceConstants.FIELD_PORTS) == null) { return new LinkedHashMap<Integer, PortSpec>(); } List<String> specs = (List<String>) launchConfigData.get(InstanceConstants.FIELD_PORTS); Map<Integer, PortSpec> portMap = new LinkedHashMap<Integer, PortSpec>(); for (String spec : specs) { PortSpec portSpec = new PortSpec(spec); portMap.put(new Integer(portSpec.getPrivatePort()), portSpec); } return portMap; } @SuppressWarnings("unchecked") public static void injectBalancerLabelsAndHealthcheck(Map<Object, Object> launchConfig) { Map<String, String> labels = new HashMap<>(); // set labels Object labelsObj = launchConfig.get(InstanceConstants.FIELD_LABELS); if (labelsObj != null) { labels = (Map<String, String>) labelsObj; } if (!labels.containsKey(SystemLabels.LABEL_AGENT_ROLE)) { labels.put(SystemLabels.LABEL_AGENT_ROLE, AgentConstants.ENVIRONMENT_ADMIN_ROLE); labels.put(SystemLabels.LABEL_AGENT_CREATE, "true"); } //check if the LB service is a drainProvider from lb image if(doesLBHaveDrainSupport(launchConfig)){ labels.put(SystemLabels.LABEL_AGENT_SERVICE_DRAIN_PROVIDER, "true"); labels.put(SystemLabels.LABEL_AGENT_ROLE, AgentConstants.ENVIRONMENT_ADMIN_ROLE + ",agent"); labels.put(SystemLabels.LABEL_AGENT_CREATE, "true"); } launchConfig.put(InstanceConstants.FIELD_LABELS, labels); // set health check if (launchConfig.get(InstanceConstants.FIELD_HEALTH_CHECK) == null) { Integer healthCheckPort = LB_HEALTH_CHECK_PORT; InstanceHealthCheck healthCheck = new InstanceHealthCheck(); healthCheck.setPort(healthCheckPort); healthCheck.setInterval(2000); healthCheck.setHealthyThreshold(2); healthCheck.setUnhealthyThreshold(3); healthCheck.setResponseTimeout(2000); healthCheck.setInitializingTimeout(60000); healthCheck.setReinitializingTimeout(60000); launchConfig.put(InstanceConstants.FIELD_HEALTH_CHECK, healthCheck); } } private static boolean doesLBHaveDrainSupport(Map<Object, Object> launchConfig) { if(launchConfig.get(InstanceConstants.FIELD_IMAGE_UUID) == null) { return false; } String imageUuid = (String)launchConfig.get(InstanceConstants.FIELD_IMAGE_UUID); Pair<String, String> instanceImage = getImageAndVersion(imageUuid.toLowerCase()); if (instanceImage.getLeft().isEmpty() || instanceImage.getRight().isEmpty()) { return false; } Pair<String, String> defaultImage = getImageAndVersion(LB_IMAGE_UUID.get().toLowerCase()); if (!defaultImage.getLeft().equals(instanceImage.getLeft())) { return false; } return isDrainProvider(instanceImage.getRight()); } private static Pair<String, String> getImageAndVersion(String imageUUID) { DockerImage dockerImage = DockerImage.parse(imageUUID); String[] splitted = dockerImage.getFullName().split(":"); if (splitted.length <= 1) { return Pair.of("", ""); } String repoAndImage = splitted[0]; String imageVersion = splitted[1]; return Pair.of(repoAndImage, imageVersion); } private static boolean isDrainProvider(String actualVersion) { String requiredVersion = LB_DRAIN_IMAGE_VERSION.get(); if (StringUtils.isEmpty(requiredVersion)) { return false; } String[] requiredParts = requiredVersion.split("\\."); if (requiredParts.length < 3) { // Required image is not following semantic versioning. return false; } int requiredMajor, requiredMinor, requiredPatch = 0; try { String majorTemp = requiredParts[0].startsWith("v") ? requiredParts[0].substring(1, requiredParts[0].length()) : requiredParts[0]; requiredMajor = Integer.valueOf(majorTemp); requiredMinor = Integer.valueOf(requiredParts[1]); requiredPatch = Integer.valueOf(requiredParts[2]); } catch (NumberFormatException e) { // Require image is not following semantic versioning. return false; } String[] actualParts = actualVersion.split("\\."); if (actualParts.length < 3) { // Image is not following semantic versioning. return false; } int actualMajor, actualMinor, actualPatch = 0; try { String majorTemp = actualParts[0].startsWith("v") ? actualParts[0].substring(1, actualParts[0].length()) : actualParts[0]; actualMajor = Integer.valueOf(majorTemp).intValue(); actualMinor = Integer.valueOf(actualParts[1]).intValue(); String[] patchParts = actualParts[2].split("\\-"); actualPatch = Integer.valueOf(patchParts[0]); } catch (NumberFormatException e) { // Image is not following semantic versioning. return false; } if (actualMajor > requiredMajor) { return true; } else if (actualMajor == requiredMajor && actualMinor > requiredMinor) { return true; } else if (actualMinor == requiredMinor && actualPatch >= requiredPatch) { return true; } return false; } @SuppressWarnings("unchecked") public static void validateScaleSwitch(Object newLaunchConfig, Object currentLaunchConfig) { if (isGlobalService((Map<Object, Object>) currentLaunchConfig) != isGlobalService((Map<Object, Object>) newLaunchConfig)) { ValidationErrorCodes.throwValidationError(ValidationErrorCodes.INVALID_OPTION, "Switching from global scale to fixed (and vice versa)"); } } @SuppressWarnings("unchecked") protected static boolean isGlobalService(Map<Object, Object> launchConfig) { // set labels Object labelsObj = launchConfig.get(InstanceConstants.FIELD_LABELS); if (labelsObj == null) { return false; } Map<String, String> labels = (Map<String, String>) labelsObj; String globalService = labels.get(ServiceConstants.LABEL_SERVICE_GLOBAL); return Boolean.valueOf(globalService); } }
10,214
5,939
package com.twitter.finagle; import java.net.InetSocketAddress; import java.util.HashMap; import java.util.Map; import org.junit.Assert; import org.junit.Test; public class AddressCompilationTest { @Test public void testInet() { InetSocketAddress ia = new InetSocketAddress(0); Address a = Addresses.newInetAddress(ia); Assert.assertNotNull(a); Map<String, Object> meta = new HashMap<>(); meta.put("foo", "bar"); Address b = Addresses.newInetAddress(ia, meta); Assert.assertNotNull(b); } @Test public void testFailed() { Address a = Addresses.newFailedAddress(new Exception("boo")); Assert.assertNotNull(a); } }
248
435
<filename>warehouse/query-core/src/main/java/datawave/core/iterators/ColumnQualifierRangeIterator.java package datawave.core.iterators; import java.io.IOException; import org.apache.accumulo.core.data.Key; import org.apache.accumulo.core.data.Range; import org.apache.accumulo.core.data.Value; import org.apache.accumulo.core.iterators.IteratorEnvironment; import org.apache.accumulo.core.iterators.SortedKeyValueIterator; import org.apache.hadoop.io.Text; /** * * This class enables range queries based on Column Qualifiers * */ public class ColumnQualifierRangeIterator extends ColumnRangeIterator { public ColumnQualifierRangeIterator() { super(); } public ColumnQualifierRangeIterator(SortedKeyValueIterator<Key,Value> source) { super(source); } public ColumnQualifierRangeIterator(SortedKeyValueIterator<Key,Value> source, Range columnFamilyRange) { super(source, columnFamilyRange); } @Override protected void consumeImpl() throws IOException { int count = 0; int limit = getSkipLimit(); while (getSource().hasTop()) { Key topColumnQual = new Key(getSource().getTopKey().getColumnQualifier()); if (getColumnRange().beforeStartKey(topColumnQual)) { // top key's CQ is before the desired range starts, need to skip some CQs... if (count < limit) { advanceSource(); ++count; } else { Text row = getSource().getTopKey().getRow(); Text cf = getSource().getTopKey().getColumnFamily(); Text startColQual = getColumnStart(); Key nextKey = new Key(row, cf, startColQual); reseek(nextKey); count = 0; } } else if (getColumnRange().afterEndKey(topColumnQual)) { // reached the end of the desired CQ range, need to go to the next CF or row if (count < limit) { advanceSource(); ++count; } else { Text row = getSource().getTopKey().getRow(); Text nextCF = new Text(followingArray(getSource().getTopKey().getColumnFamily().getBytes())); Text startColQual = getColumnStart(); Key nextKey = new Key(row, nextCF, startColQual); reseek(nextKey); count = 0; } } else { // within the range, break the consuming loop... break; } } // end while() } // end consume() @Override public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) { return new ColumnQualifierRangeIterator(getSource().deepCopy(env), getColumnRange()); } }
1,341
1,273
<filename>src/main/java/org/broadinstitute/hellbender/cmdline/PicardCommandLineProgram.java package org.broadinstitute.hellbender.cmdline; import htsjdk.samtools.*; import htsjdk.samtools.util.BlockCompressedOutputStream; import htsjdk.samtools.util.BlockCompressedStreamConstants; import org.broadinstitute.barclay.argparser.Argument; import org.broadinstitute.hellbender.utils.read.ReadUtils; import java.io.File; /** * Base class for all Picard tools. Includes standard options for dealing with common sequence data formats. */ public abstract class PicardCommandLineProgram extends CommandLineProgram { @Argument(doc = "Validation stringency for all SAM files read by this program. Setting stringency to SILENT " + "can improve performance when processing a BAM file in which variable-length data (read, qualities, tags) " + "do not otherwise need to be decoded.", common=true) public ValidationStringency VALIDATION_STRINGENCY = ValidationStringency.DEFAULT_STRINGENCY; @Argument(doc = "Compression level for all compressed files created (e.g. BAM and GELI).", common=true) public int COMPRESSION_LEVEL = BlockCompressedStreamConstants.DEFAULT_COMPRESSION_LEVEL; @Argument(doc = "When writing SAM files that need to be sorted, this will specify the number of records stored in RAM before spilling to disk. Increasing this number reduces the number of file handles needed to sort a SAM file, and increases the amount of RAM needed.", optional=true, common=true) public Integer MAX_RECORDS_IN_RAM = SAMFileWriterImpl.getDefaultMaxRecordsInRam(); @Argument(doc = "Whether to create a BAM index when writing a coordinate-sorted BAM file.", common=true) public Boolean CREATE_INDEX = Defaults.CREATE_INDEX; @Argument(doc="Whether to create an MD5 digest for any BAM or FASTQ files created. ", common=true) public boolean CREATE_MD5_FILE = Defaults.CREATE_MD5; @Argument(fullName = StandardArgumentDefinitions.REFERENCE_LONG_NAME, shortName = StandardArgumentDefinitions.REFERENCE_SHORT_NAME, doc = "Reference sequence file.", common = true, optional = true) public File REFERENCE_SEQUENCE = Defaults.REFERENCE_FASTA; @Override public Object instanceMain(final String[] argv) { // First, we parse the commandline arguments, then we set important statics like VALIDATION_STRINGENCY, and // finally, we call into the normal instance main (post arg-parsing). If don't start with the argument parsing // we always get default values for VALIDATION_STRINGENCY, COMPRESSION_LEVEL, etc. if (!parseArgs(argv)) { //an information only argument like help or version was specified, just exit return 0; } // set general SAM/BAM parameters SamReaderFactory.setDefaultValidationStringency(VALIDATION_STRINGENCY); BlockCompressedOutputStream.setDefaultCompressionLevel(COMPRESSION_LEVEL); if (MAX_RECORDS_IN_RAM != null) { SAMFileWriterImpl.setDefaultMaxRecordsInRam(MAX_RECORDS_IN_RAM); } if (CREATE_INDEX){ SAMFileWriterFactory.setDefaultCreateIndexWhileWriting(true); } SAMFileWriterFactory.setDefaultCreateMd5File(CREATE_MD5_FILE); // defer to parent to finish the initialization and starting the program. return instanceMainPostParseArgs(); } /** * Create a common SAMFileWriter for use with Picard tools. * * @param outputFile - if this file has a .cram extension then a reference is required. Can not be null. * @param referenceFile - the reference source to use. Can not be null if a output file has a .cram extension. * @param header - header to be used for the output writer * @param preSorted - if true then the records must already be sorted to match the header sort order * @return SAMFileWriter */ public SAMFileWriter createSAMWriter( final File outputFile, final File referenceFile, final SAMFileHeader header, final boolean preSorted) { BlockCompressedOutputStream.setDefaultCompressionLevel(COMPRESSION_LEVEL); SAMFileWriterFactory factory = new SAMFileWriterFactory() .setCreateIndex(CREATE_INDEX) .setCreateMd5File(CREATE_MD5_FILE); if (MAX_RECORDS_IN_RAM != null) { factory = factory.setMaxRecordsInRam(MAX_RECORDS_IN_RAM); } return ReadUtils.createCommonSAMWriterFromFactory(factory, outputFile, referenceFile, header, preSorted); } }
1,588
13,709
package com.reactnativenavigation.utils; import android.os.Build; import android.view.View; import java.util.concurrent.atomic.AtomicInteger; public class CompatUtils { private static final AtomicInteger viewId = new AtomicInteger(1); public static int generateViewId() { if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.KITKAT) { return View.generateViewId(); } else { while (true) { final int result = viewId.get(); // aapt-generated IDs have the high byte nonzero; clamp to the range under that. int newValue = result + 1; if (newValue > 0x00FFFFFF) newValue = 1; // Roll over to 1, not 0. if (viewId.compareAndSet(result, newValue)) { return result; } } } } }
267
435
{ "copyright_text": "Creative Commons Attribution license (reuse allowed)", "description": "Se presenta una aplicaci\u00f3n para el almacenamiento, an\u00e1lisis, y visualizaci\u00f3n interactiva de datos heterog\u00e9neos pertenecientes al biobanco de la Patagonia (proyecto RAICES), basado en herramientas libres como Django Rest Framework, Vue.js, Bokeh, THREE.js, y contenedores de Docker.\n\nEl Instituto Patag\u00f3nico de Ciencias Sociales y Humanas de Ciencias\nSociales y Humanas (`IPCSH-CONICET`_) estudia c\u00f3mo se relacionan las\ncostumbres y herencias de la regi\u00f3n con variables f\u00edsicas de inter\u00e9s\nm\u00e9dico, en particular con rasgos de riesgo para enfermedades complejas\ncomo obesidad, sobrepeso y s\u00edndrome metab\u00f3lico, entre otras. El proyecto\n`RAICES`_ (iniciativa propia del IPCSH-CONICET) busca generar un banco\nde datos gen\u00e9ticos o biobanco de la Patagonia que pueda servir para\nfuturas aplicaciones en el dise\u00f1o de pol\u00edticas p\u00fablicas de salud. Este\ntrabajo, realizado a trav\u00e9s de una pasant\u00eda promovida por el Laboratorio\nde Ciencias de las Im\u00e1genes (LCI) de la Universidad Nacional del Sur,\npresenta la implementaci\u00f3n de un datawarehouse (basado en Python y\nMongoDB) que permite a los investigadores del proyecto almacenar,\nconsultar y analizar datos heterog\u00e9neos propios de la colecta (encuestas\nsocioecon\u00f3micas, informaci\u00f3n geogr\u00e1fica y gen\u00e9tica, h\u00e1bitos\nalimenticios, modelos 3D, v\u00eddeos de cuerpo completo, medidas\nantropom\u00e9tricas, entre otros). Dado que las fuentes de datos eran\ndiversas, se trabaj\u00f3 en un m\u00f3dulo de *ETL* (*Extract, Transform and\nLoad*) para normalizar los datos y adaptarlos al modelo del\ndatawarehouse. Junto con el datawarehouse se dise\u00f1\u00f3 y desarroll\u00f3 una\n`aplicaci\u00f3n web`_ para la carga masiva de datos y acceso a los mismos en\nforma selectiva, brindando tambi\u00e9n una plataforma interactiva de\nvisualizaci\u00f3n de datos exploratoria (gr\u00e1ficos y modelos 3D). Se tiene\ncomo resultado un framework f\u00e1cilmente generalizable basado en\ntecnolog\u00edas libres disponible para ser utilizado en otros \u00e1mbitos\ncient\u00edficos Las tecnolog\u00edas utilizadas para el desarrollo del trabajo\nfueron las siguientes: Django Rest Framework, Vue.js, Bokeh, THREE.js,\nPandas, pyocr, pillow, Mongoengine, Docker Este trabajo fue realizado a\ntrav\u00e9s de una pasant\u00eda promovida por el Laboratorio de Ciencias de las\nIm\u00e1genes (LCI) de la Universidad Nacional del Sur.\n\n.. _IPCSH-CONICET: http://www.ipcsh.conicet.gob.ar/\n.. _RAICES: https://www.facebook.com/proyecto.raices.patagonia/\n.. _aplicaci\u00f3n web: https://youtu.be/CIpbHRLAG0I\n", "language": "spa", "recorded": "2018-11-24", "speakers": [ "<NAME>", "<NAME>" ], "thumbnail_url": "https://i.ytimg.com/vi/RD3m0Z6OpIA/hqdefault.jpg", "title": "Datawarehousing para datos gen\u00e9ticos, socioecon\u00f3micos y fenot\u00edpicos, con visualizaci\u00f3n 3D", "videos": [ { "type": "youtube", "url": "https://www.youtube.com/watch?v=RD3m0Z6OpIA" } ] }
1,333
626
<reponame>yjt98765/qsharp-compiler<filename>examples/QIR/JITCompilation/qir-jit.py<gh_stars>100-1000 import sys, platform import llvmlite.binding as llvm from ctypes import CFUNCTYPE linux_runtime_libs = ["build/libMicrosoft.Quantum.Qir.Runtime.so", "build/libMicrosoft.Quantum.Qir.QSharp.Core.so", "build/libMicrosoft.Quantum.Qir.QSharp.Foundation.so", "build/Microsoft.Quantum.Simulator.Runtime.dll", "build/libQIRinit.so"] windows_runtime_libs = ["build/Microsoft.Quantum.Qir.Runtime.dll", "build/Microsoft.Quantum.Qir.QSharp.Core.dll", "build/Microsoft.Quantum.Qir.QSharp.Foundation.dll", "build/Microsoft.Quantum.Simulator.Runtime.dll", "build/QIRinit.dll"] if platform.system() == "Linux": runtime_libs = linux_runtime_libs elif platform.system() == "Windows": runtime_libs = windows_runtime_libs else: raise Exception("unsupported platform") def main(qir_file, entry_point): # Initialize LLVM llvm.initialize() llvm.initialize_native_target() llvm.initialize_native_asmprinter() # Load the QIR Runtime libraries for lib in runtime_libs: llvm.load_library_permanently(lib) # Parse the provided QIR module file = open(qir_file, 'r') module = llvm.parse_assembly(file.read()) # Create a jit execution engine target = llvm.Target.from_default_triple().create_target_machine() jit_engine = llvm.create_mcjit_compiler(module, target) # Initialize the QIR Runtime and simulator via exposed C wrapper fun_ptr = llvm.address_of_symbol("InitQIRSim") CFUNCTYPE(None)(fun_ptr)() # Run the entry point of the QIR module fun_ptr = jit_engine.get_function_address(entry_point) CFUNCTYPE(None)(fun_ptr)() if __name__ == "__main__": assert len(sys.argv) == 3, "need to supply qir file and entry point arguments" main(sys.argv[1], sys.argv[2])
876
365
# -*- coding: utf-8 -*- from benedict.serializers import JSONSerializer import unittest class json_serializer_test_case(unittest.TestCase): def test_decode_json(self): # TODO pass def test_encode_json(self): # TODO pass
120
2,023
def main(): while True: try: string = raw_input('What numbers should I average? ') words = string.split() numbers = map(float, words) average = sum(numbers) / len(numbers) print 'The average is', average raw_input('Press enter to quit.\n') return except: print 'ERROR: I can only take numbers!' if __name__ == '__main__': main()
216
1,144
/****************************************************************************** * Product: Adempiere ERP & CRM Smart Business Solution * * Copyright (C) 1999-2007 ComPiere, Inc. All Rights Reserved. * * This program is free software, you can redistribute it and/or modify it * * under the terms version 2 of the GNU General Public License as published * * by the Free Software Foundation. This program is distributed in the hope * * that it will be useful, but WITHOUT ANY WARRANTY, without even the implied * * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * * with this program, if not, write to the Free Software Foundation, Inc., * * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * For the text or an alternative of this public license, you may reach us * * ComPiere, Inc., 2620 Augustine Dr. #245, Santa Clara, CA 95054, USA * * or via <EMAIL> or http://www.compiere.org/license.html * *****************************************************************************/ package org.compiere.model; import java.math.BigDecimal; import java.sql.Timestamp; import org.compiere.util.KeyNamePair; /** Generated Interface for R_RequestUpdate * @author Adempiere (generated) * @version Release 3.5.4a */ public interface I_R_RequestUpdate { /** TableName=R_RequestUpdate */ public static final String Table_Name = "R_RequestUpdate"; /** AD_Table_ID=802 */ public static final int Table_ID = MTable.getTable_ID(Table_Name); KeyNamePair Model = new KeyNamePair(Table_ID, Table_Name); /** AccessLevel = 7 - System - Client - Org */ BigDecimal accessLevel = BigDecimal.valueOf(7); /** Load Meta Data */ /** Column name AD_Client_ID */ public static final String COLUMNNAME_AD_Client_ID = "AD_Client_ID"; /** Get Client. * Client/Tenant for this installation. */ public int getAD_Client_ID(); /** Column name AD_Org_ID */ public static final String COLUMNNAME_AD_Org_ID = "AD_Org_ID"; /** Set Organization. * Organizational entity within client */ public void setAD_Org_ID (int AD_Org_ID); /** Get Organization. * Organizational entity within client */ public int getAD_Org_ID(); /** Column name ConfidentialTypeEntry */ public static final String COLUMNNAME_ConfidentialTypeEntry = "ConfidentialTypeEntry"; /** Set Entry Confidentiality. * Confidentiality of the individual entry */ public void setConfidentialTypeEntry (String ConfidentialTypeEntry); /** Get Entry Confidentiality. * Confidentiality of the individual entry */ public String getConfidentialTypeEntry(); /** Column name Created */ public static final String COLUMNNAME_Created = "Created"; /** Get Created. * Date this record was created */ public Timestamp getCreated(); /** Column name CreatedBy */ public static final String COLUMNNAME_CreatedBy = "CreatedBy"; /** Get Created By. * User who created this records */ public int getCreatedBy(); /** Column name EndTime */ public static final String COLUMNNAME_EndTime = "EndTime"; /** Set End Time. * End of the time span */ public void setEndTime (Timestamp EndTime); /** Get End Time. * End of the time span */ public Timestamp getEndTime(); /** Column name IsActive */ public static final String COLUMNNAME_IsActive = "IsActive"; /** Set Active. * The record is active in the system */ public void setIsActive (boolean IsActive); /** Get Active. * The record is active in the system */ public boolean isActive(); /** Column name M_ProductSpent_ID */ public static final String COLUMNNAME_M_ProductSpent_ID = "M_ProductSpent_ID"; /** Set Product Used. * Product/Resource/Service used in Request */ public void setM_ProductSpent_ID (int M_ProductSpent_ID); /** Get Product Used. * Product/Resource/Service used in Request */ public int getM_ProductSpent_ID(); public I_M_Product getM_ProductSpent() throws RuntimeException; /** Column name QtyInvoiced */ public static final String COLUMNNAME_QtyInvoiced = "QtyInvoiced"; /** Set Quantity Invoiced. * Invoiced Quantity */ public void setQtyInvoiced (BigDecimal QtyInvoiced); /** Get Quantity Invoiced. * Invoiced Quantity */ public BigDecimal getQtyInvoiced(); /** Column name QtySpent */ public static final String COLUMNNAME_QtySpent = "QtySpent"; /** Set Quantity Used. * Quantity used for this event */ public void setQtySpent (BigDecimal QtySpent); /** Get Quantity Used. * Quantity used for this event */ public BigDecimal getQtySpent(); /** Column name Result */ public static final String COLUMNNAME_Result = "Result"; /** Set Result. * Result of the action taken */ public void setResult (String Result); /** Get Result. * Result of the action taken */ public String getResult(); /** Column name R_Request_ID */ public static final String COLUMNNAME_R_Request_ID = "R_Request_ID"; /** Set Request. * Request from a Business Partner or Prospect */ public void setR_Request_ID (int R_Request_ID); /** Get Request. * Request from a Business Partner or Prospect */ public int getR_Request_ID(); public I_R_Request getR_Request() throws RuntimeException; /** Column name R_RequestUpdate_ID */ public static final String COLUMNNAME_R_RequestUpdate_ID = "R_RequestUpdate_ID"; /** Set Request Update. * Request Updates */ public void setR_RequestUpdate_ID (int R_RequestUpdate_ID); /** Get Request Update. * Request Updates */ public int getR_RequestUpdate_ID(); /** Column name StartTime */ public static final String COLUMNNAME_StartTime = "StartTime"; /** Set Start Time. * Time started */ public void setStartTime (Timestamp StartTime); /** Get Start Time. * Time started */ public Timestamp getStartTime(); /** Column name Updated */ public static final String COLUMNNAME_Updated = "Updated"; /** Get Updated. * Date this record was updated */ public Timestamp getUpdated(); /** Column name UpdatedBy */ public static final String COLUMNNAME_UpdatedBy = "UpdatedBy"; /** Get Updated By. * User who updated this records */ public int getUpdatedBy(); }
2,094
303
/* * Copyright 2009 Weaver authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.testing.instrumentation; /** * An object that transforms the byte code that defines a class. * * @author <EMAIL> (<NAME>) */ public interface Instrumenter { /** * Transforms an array of bytes that defines a class. * * @param className the name of the class being defined. * @param classData the bytes that make up the class data. * * @return the transformed bytecode. A given Instrumenter may chose not to * transform all classes, in which case it may just return the given * classData. * */ byte[] instrument(String className, byte[] classData); }
333
301
# This script shows the usage of binary message types # It demonstrates the low level interaction (also suitable for uControlers) to do a https POST in order to send data from the device to the Cloud # all configuration settings come from config.py try: import config except ImportError: print("Please copy template-config.py to config.py and configure appropriately !"); exit(); import sys import socket import ssl import base64 input="test content" # could be binary, e.g. an image or arbitrary file content input_base64=(base64.b64encode(input.encode('utf-8'))).decode('utf-8') post_payload='{"mode":"async", "messageType":"' post_payload=post_payload + config.message_type_upstream post_payload=post_payload + '", "messages":[{"' + config.fieldname_upstream + '":"' post_payload=post_payload + input_base64 post_payload=post_payload + '"}]}' # print(post_payload) s=socket.socket(socket.AF_INET, socket.SOCK_STREAM) c=ssl.wrap_socket(s) c.connect(socket.getaddrinfo(config.host, 443)[0][4]) request='POST /com.sap.iotservices.mms/v1/api/http/data/' + config.device_id + ' HTTP/1.1\r\n' request=request + 'Host: ' + config.host + '\r\n' request=request + 'Content-Type: application/json;charset=utf-8\r\n' request=request + 'Authorization: Bearer ' + config.oauth_token + '\r\n' request=request + 'Content-Length: ' + str(len(post_payload)) + '\r\n\r\n' request=request + post_payload + '\r\n\r\n' # print(request) if (sys.version_info.major == 3): c.write(bytes(request, 'ascii')) else: c.write(request) result=c.read(4096) print(result) c.close()
561
367
from enum import Enum from typing import Optional from pydantic import BaseModel, Field, validator class PolicyStoreTypes(Enum): OPA = "OPA" MOCK = "MOCK" class PolicyStoreDetails(BaseModel): """ represents a policy store endpoint - contains the policy store's: - location (url) - type - credentials """ type: PolicyStoreTypes = Field(PolicyStoreTypes.OPA, description="the type of policy store, currently only OPA is officially supported") url: str = Field(..., description="the url that OPA can be found in. if localhost is the host - it means OPA is on the same hostname as OPAL client.") token: Optional[str] = Field(None, description="optional access token required by the policy store") @validator('type') def force_enum(cls, v): if isinstance(v, str): return PolicyStoreTypes(v) if isinstance(v, PolicyStoreTypes): return v raise ValueError(f'invalid value: {v}') class Config: use_enum_values = True allow_population_by_field_name = True
372
1,532
<reponame>charles-l/pyinfra from .client import SSHClient # noqa: F401
28
713
package org.infinispan.stream; import org.infinispan.configuration.cache.ConfigurationBuilder; import org.infinispan.configuration.cache.StorageType; import org.testng.annotations.Test; /** * @author <NAME> */ @Test(groups = "functional", testName = "streams.SimpleStreamOffHeapTest") public class SimpleStreamOffHeapTest extends SimpleStreamTest { @Override protected void enhanceConfiguration(ConfigurationBuilder builder) { super.enhanceConfiguration(builder); builder.memory().storageType(StorageType.OFF_HEAP); } }
163
707
// Copyright (c) FIRST and other WPILib contributors. // Open Source Software; you can modify and/or share it under the terms of // the WPILib BSD license file in the root directory of this project. #include <string_view> #include <wpi/SmallString.h> #include <wpi/StringExtras.h> #include <wpi/raw_ostream.h> #include "frc/ScopedTracer.h" #include "frc/simulation/SimHooks.h" #include "gtest/gtest.h" TEST(ScopedTracerTest, Timing) { wpi::SmallString<128> buf; wpi::raw_svector_ostream os(buf); frc::sim::PauseTiming(); { frc::ScopedTracer tracer("timing_test", os); frc::sim::StepTiming(1.5_s); } frc::sim::ResumeTiming(); std::string_view out = os.str(); EXPECT_TRUE(wpi::starts_with(out, " timing_test: 1.5")); }
299
420
<filename>Code-Sleep-Python/speech-to-text/stt.py<gh_stars>100-1000 import speech_recognition as sr r = sr.Recognizer() flag = 1 while flag=1: with sr.Microphone() as source: print("Say something... ") audio = r.listen(source) try: print("Google thinks you said: {}".format(r.recognize_google(audio))) flag = 0 except sr.UnkownValueError: print("Couldn't understand your voice. Please speak again.") except sr.RequestError as e: print("Couldn't request results. Please speak again.; {}".format(e))
223
15,577
<filename>src/Functions/FunctionsStringHash.cpp #include <Functions/FunctionsStringHash.h> #include <Functions/ExtractString.h> #include <Functions/FunctionFactory.h> #include <Functions/FunctionsHashing.h> #include <Common/PODArray.h> #include <Core/Defines.h> #include <functional> #include <tuple> #include <vector> #include <base/unaligned.h> #include <city.h> namespace DB { namespace ErrorCodes { extern const int NOT_IMPLEMENTED; } struct BytesRef { const UInt8 * data; size_t size; }; struct Hash { static UInt64 crc32u64(UInt64 crc [[maybe_unused]], UInt64 val [[maybe_unused]]) { #ifdef __SSE4_2__ return _mm_crc32_u64(crc, val); #elif defined(__aarch64__) && defined(__ARM_FEATURE_CRC32) return __crc32cd(crc, val); #else throw Exception("String hash is not implemented without sse4.2 support", ErrorCodes::NOT_IMPLEMENTED); #endif } static UInt64 crc32u32(UInt64 crc [[maybe_unused]], UInt32 val [[maybe_unused]]) { #ifdef __SSE4_2__ return _mm_crc32_u32(crc, val); #elif defined(__aarch64__) && defined(__ARM_FEATURE_CRC32) return __crc32cw(crc, val); #else throw Exception("String hash is not implemented without sse4.2 support", ErrorCodes::NOT_IMPLEMENTED); #endif } static UInt64 crc32u16(UInt64 crc [[maybe_unused]], UInt16 val [[maybe_unused]]) { #ifdef __SSE4_2__ return _mm_crc32_u16(crc, val); #elif defined(__aarch64__) && defined(__ARM_FEATURE_CRC32) return __crc32ch(crc, val); #else throw Exception("String hash is not implemented without sse4.2 support", ErrorCodes::NOT_IMPLEMENTED); #endif } static UInt64 crc32u8(UInt64 crc [[maybe_unused]], UInt8 val [[maybe_unused]]) { #ifdef __SSE4_2__ return _mm_crc32_u8(crc, val); #elif defined(__aarch64__) && defined(__ARM_FEATURE_CRC32) return __crc32cb(crc, val); #else throw Exception("String hash is not implemented without sse4.2 support", ErrorCodes::NOT_IMPLEMENTED); #endif } template <bool CaseInsensitive> static ALWAYS_INLINE inline UInt64 shingleHash(UInt64 crc, const UInt8 * start, size_t size) { if (size & 1) { UInt8 x = *start; if constexpr (CaseInsensitive) x |= 0x20u; /// see toLowerIfAlphaASCII from StringUtils.h crc = crc32u8(crc, x); --size; ++start; } if (size & 2) { UInt16 x = unalignedLoad<UInt16>(start); if constexpr (CaseInsensitive) x |= 0x2020u; crc = crc32u16(crc, x); size -= 2; start += 2; } if (size & 4) { UInt32 x = unalignedLoad<UInt32>(start); if constexpr (CaseInsensitive) x |= 0x20202020u; crc = crc32u32(crc, x); size -= 4; start += 4; } while (size) { UInt64 x = unalignedLoad<UInt64>(start); if constexpr (CaseInsensitive) x |= 0x2020202020202020u; crc = crc32u64(crc, x); size -= 8; start += 8; } return crc; } template <bool CaseInsensitive> static ALWAYS_INLINE inline UInt64 shingleHash(const std::vector<BytesRef> & shingle, size_t offset = 0) { UInt64 crc = -1ULL; for (size_t i = offset; i < shingle.size(); ++i) crc = shingleHash<CaseInsensitive>(crc, shingle[i].data, shingle[i].size); for (size_t i = 0; i < offset; ++i) crc = shingleHash<CaseInsensitive>(crc, shingle[i].data, shingle[i].size); return crc; } }; // SimHash String -> UInt64 // UTF8: means ASCII or UTF8, these two parameters CodePoint and UTF8 can only be (UInt8, false) or (UInt32, true) // Ngram: means ngram(true) or words shingles(false) // CaseInsensitive: means should we consider about letter case or not template <bool UTF8, bool Ngram, bool CaseInsensitive> struct SimHashImpl { static constexpr size_t min_word_size = 4; /// Update fingerprint according to hash_value bits. static ALWAYS_INLINE inline void updateFingerVector(Int64 * finger_vec, UInt64 hash_value) { for (size_t i = 0; i < 64; ++i) finger_vec[i] += (hash_value & (1ULL << i)) ? 1 : -1; } /// Return a 64 bit value according to finger_vec. static ALWAYS_INLINE inline UInt64 getSimHash(const Int64 * finger_vec) { UInt64 res = 0; for (size_t i = 0; i < 64; ++i) if (finger_vec[i] > 0) res |= (1ULL << i); return res; } // SimHash ngram calculate function: String -> UInt64 // this function extracting ngram from input string, and maintain a 64-dimensions vector // for each ngram, calculate a 64 bit hash value, and update the vector according the hash value // finally return a 64 bit value(UInt64), i'th bit is 1 means vector[i] > 0, otherwise, vector[i] < 0 static ALWAYS_INLINE inline UInt64 ngramHashASCII(const UInt8 * data, size_t size, size_t shingle_size) { if (size < shingle_size) return Hash::shingleHash<CaseInsensitive>(-1ULL, data, size); Int64 finger_vec[64] = {}; const UInt8 * end = data + size; for (const UInt8 * pos = data; pos + shingle_size <= end; ++pos) { UInt64 hash_value = Hash::shingleHash<CaseInsensitive>(-1ULL, pos, shingle_size); updateFingerVector(finger_vec, hash_value); } return getSimHash(finger_vec); } static ALWAYS_INLINE inline UInt64 ngramHashUTF8(const UInt8 * data, size_t size, size_t shingle_size) { const UInt8 * start = data; const UInt8 * end = data + size; const UInt8 * word_start = start; const UInt8 * word_end = start; for (size_t i = 0; i < shingle_size; ++i) { if (word_end >= end) return Hash::shingleHash<CaseInsensitive>(-1ULL, data, size); ExtractStringImpl::readOneUTF8Code(word_end, end); } Int64 finger_vec[64] = {}; while (word_end < end) { ExtractStringImpl::readOneUTF8Code(word_start, word_end); ExtractStringImpl::readOneUTF8Code(word_end, end); size_t length = word_end - word_start; UInt64 hash_value = Hash::shingleHash<CaseInsensitive>(-1ULL, word_start, length); updateFingerVector(finger_vec, hash_value); } return getSimHash(finger_vec); } // SimHash word shingle calculate function: String -> UInt64 // this function extracting n word shingle from input string, and maintain a 64-dimensions vector as well // for each word shingle, calculate a 64 bit hash value, and update the vector according the hash value // finally return a 64 bit value(UInt64), i'th bit is 1 means vector[i] > 0, otherwise, vector[i] < 0 // // word shingle hash value calculate: // 1. at the first, extracts N word shingles and calculate N hash values, store into an array, use this N hash values // to calculate the first word shingle hash value // 2. next, we extract one word each time, and calculate a new hash value of the new word,then use the latest N hash // values to calculate the next word shingle hash value static ALWAYS_INLINE inline UInt64 wordShingleHash(const UInt8 * data, size_t size, size_t shingle_size) { const UInt8 * start = data; const UInt8 * end = data + size; // A 64 bit vector initialized to zero. Int64 finger_vec[64] = {}; // An array to store N words. std::vector<BytesRef> words; words.reserve(shingle_size); // get first word shingle while (start < end && words.size() < shingle_size) { const UInt8 * word_start = ExtractStringImpl::readOneWord(start, end); size_t length = start - word_start; if (length >= min_word_size) words.emplace_back(BytesRef{word_start, length}); } if (words.empty()) return 0; UInt64 hash_value = Hash::shingleHash<CaseInsensitive>(words); updateFingerVector(finger_vec, hash_value); size_t offset = 0; while (start < end) { const UInt8 * word_start = ExtractStringImpl::readOneWord(start, end); size_t length = start - word_start; if (length < min_word_size) continue; // we need to store the new word hash value to the oldest location. // for example, N = 5, array |a0|a1|a2|a3|a4|, now , a0 is the oldest location, // so we need to store new word hash into location of a0, then ,this array become // |a5|a1|a2|a3|a4|, next time, a1 become the oldest location, we need to store new // word hash value into location of a1, then array become |a5|a6|a2|a3|a4| words[offset] = BytesRef{word_start, length}; ++offset; if (offset >= shingle_size) offset = 0; // according to the word hash storation way, in order to not lose the word shingle's // sequence information, when calculation word shingle hash value, we need provide the offset // information, which is the offset of the first word's hash value of the word shingle hash_value = Hash::shingleHash<CaseInsensitive>(words, offset); updateFingerVector(finger_vec, hash_value); } return getSimHash(finger_vec); } static void apply(const ColumnString::Chars & data, const ColumnString::Offsets & offsets, size_t shingle_size, PaddedPODArray<UInt64> & res) { for (size_t i = 0; i < offsets.size(); ++i) { const UInt8 * one_data = &data[offsets[i - 1]]; const size_t data_size = offsets[i] - offsets[i - 1] - 1; if constexpr (Ngram) { if constexpr (!UTF8) res[i] = ngramHashASCII(one_data, data_size, shingle_size); else res[i] = ngramHashUTF8(one_data, data_size, shingle_size); } else { res[i] = wordShingleHash(one_data, data_size, shingle_size); } } } }; // MinHash: String -> Tuple(UInt64, UInt64) // for each string, we extract ngram or word shingle, // for each ngram or word shingle, calculate a hash value, // then we take the K minimum hash values to calculate a hashsum, // and take the K maximum hash values to calculate another hashsum, // return this two hashsum: Tuple(hashsum1, hashsum2) // // UTF8: means ASCII or UTF8, these two parameters CodePoint and UTF8 can only be (UInt8, false) or (UInt32, true) // Ngram: means ngram(true) or words shingles(false) // CaseInsensitive: means should we consider about letter case or not template <bool UTF8, bool Ngram, bool CaseInsensitive> struct MinHashImpl { static constexpr size_t min_word_size = 4; template <typename Comp> struct Heap { void update(UInt64 hash, BytesRef ref, size_t limit) { if (values.count(hash)) return; values[hash] = ref; if (values.size() > limit) values.erase(values.begin()); } UInt64 getHash() { if (values.empty()) return 0; UInt64 res = 0; for (auto it = values.begin(); it != values.end(); ++it) res = CityHash_v1_0_2::Hash128to64(CityHash_v1_0_2::uint128(res, it->first)); return res; } void fill(ColumnTuple & strings) { auto it = values.begin(); for (size_t i = 0; i < strings.tupleSize(); ++i) { auto & col_string = static_cast<ColumnString &>(strings.getColumn(i)); if (it != values.end()) { col_string.insertData(reinterpret_cast<const char *>(it->second.data), it->second.size); ++it; } else col_string.insertDefault(); } } std::map<UInt64, BytesRef, Comp> values; }; using MaxHeap = Heap<std::less<size_t>>; using MinHeap = Heap<std::greater<size_t>>; static ALWAYS_INLINE inline void ngramHashASCII( MinHeap & min_heap, MaxHeap & max_heap, const UInt8 * data, size_t size, size_t shingle_size, size_t heap_size) { if (size < shingle_size) { UInt64 hash_value = Hash::shingleHash<CaseInsensitive>(-1ULL, data, size); min_heap.update(hash_value, BytesRef{data, size}, heap_size); max_heap.update(hash_value, BytesRef{data, size}, heap_size); return; } const UInt8 * end = data + size; for (const UInt8 * pos = data; pos + shingle_size <= end; ++pos) { UInt64 hash_value = Hash::shingleHash<CaseInsensitive>(-1ULL, pos, shingle_size); // insert the new hash value into array used to store K minimum value // and K maximum value min_heap.update(hash_value, BytesRef{pos, shingle_size}, heap_size); max_heap.update(hash_value, BytesRef{pos, shingle_size}, heap_size); } } static ALWAYS_INLINE inline void ngramHashUTF8( MinHeap & min_heap, MaxHeap & max_heap, const UInt8 * data, size_t size, size_t shingle_size, size_t heap_size) { const UInt8 * start = data; const UInt8 * end = data + size; const UInt8 * word_start = start; const UInt8 * word_end = start; for (size_t i = 0; i < shingle_size; ++i) { if (word_end >= end) { auto hash_value = Hash::shingleHash<CaseInsensitive>(-1ULL, data, size); min_heap.update(hash_value, BytesRef{data, size}, heap_size); max_heap.update(hash_value, BytesRef{data, size}, heap_size); return; } ExtractStringImpl::readOneUTF8Code(word_end, end); } while (word_end < end) { ExtractStringImpl::readOneUTF8Code(word_start, word_end); ExtractStringImpl::readOneUTF8Code(word_end, end); size_t length = word_end - word_start; UInt64 hash_value = Hash::shingleHash<CaseInsensitive>(-1ULL, word_start, length); min_heap.update(hash_value, BytesRef{word_start, length}, heap_size); max_heap.update(hash_value, BytesRef{word_start, length}, heap_size); } } // MinHash word shingle hash value calculate function: String ->Tuple(UInt64, UInt64) // for each word shingle, we calculate a hash value, but in fact, we just maintain the // K minimum and K maximum hash value static ALWAYS_INLINE inline void wordShingleHash( MinHeap & min_heap, MaxHeap & max_heap, const UInt8 * data, size_t size, size_t shingle_size, size_t heap_size) { const UInt8 * start = data; const UInt8 * end = data + size; // An array to store N words. std::vector<BytesRef> words; words.reserve(shingle_size); // get first word shingle while (start < end && words.size() < shingle_size) { const UInt8 * word_start = ExtractStringImpl::readOneWord(start, end); size_t length = start - word_start; if (length >= min_word_size) words.emplace_back(BytesRef{word_start, length}); } if (words.empty()) return; UInt64 hash_value = Hash::shingleHash<CaseInsensitive>(words); { const UInt8 * shingle_start = words.front().data; const UInt8 * shingle_end = words.back().data + words.back().size; BytesRef ref{shingle_start, static_cast<size_t>(shingle_end - shingle_start)}; min_heap.update(hash_value, ref, heap_size); max_heap.update(hash_value, ref, heap_size); } size_t offset = 0; while (start < end) { const UInt8 * word_start = ExtractStringImpl::readOneWord(start, end); size_t length = start - word_start; if (length < min_word_size) continue; words[offset] = BytesRef{word_start, length}; const UInt8 * shingle_end = words[offset].data + length; ++offset; if (offset >= shingle_size) offset = 0; const UInt8 * shingle_start = words[offset].data; hash_value = Hash::shingleHash<CaseInsensitive>(words, offset); BytesRef ref{shingle_start, static_cast<size_t>(shingle_end - shingle_start)}; min_heap.update(hash_value, ref, heap_size); max_heap.update(hash_value, ref, heap_size); } } static void apply( const ColumnString::Chars & data, const ColumnString::Offsets & offsets, size_t shingle_size, size_t heap_size, PaddedPODArray<UInt64> * res1, PaddedPODArray<UInt64> * res2, ColumnTuple * res1_strings, ColumnTuple * res2_strings) { MinHeap min_heap; MaxHeap max_heap; for (size_t i = 0; i < offsets.size(); ++i) { const UInt8 * one_data = &data[offsets[i - 1]]; const size_t data_size = offsets[i] - offsets[i - 1] - 1; min_heap.values.clear(); max_heap.values.clear(); if constexpr (Ngram) { if constexpr (!UTF8) ngramHashASCII(min_heap, max_heap, one_data, data_size, shingle_size, heap_size); else ngramHashUTF8(min_heap, max_heap, one_data, data_size, shingle_size, heap_size); } else { wordShingleHash(min_heap, max_heap, one_data, data_size, shingle_size, heap_size); } if (res1) (*res1)[i] = min_heap.getHash(); if (res2) (*res2)[i] = max_heap.getHash(); if (res1_strings) min_heap.fill(*res1_strings); if (res2_strings) max_heap.fill(*res2_strings); } } }; struct NameNgramSimHash { static constexpr auto name = "ngramSimHash"; }; struct NameNgramSimHashCaseInsensitive { static constexpr auto name = "ngramSimHashCaseInsensitive"; }; struct NameNgramSimHashUTF8 { static constexpr auto name = "ngramSimHashUTF8"; }; struct NameNgramSimHashCaseInsensitiveUTF8 { static constexpr auto name = "ngramSimHashCaseInsensitiveUTF8"; }; struct NameWordShingleSimHash { static constexpr auto name = "wordShingleSimHash"; }; struct NameWordShingleSimHashCaseInsensitive { static constexpr auto name = "wordShingleSimHashCaseInsensitive"; }; struct NameWordShingleSimHashUTF8 { static constexpr auto name = "wordShingleSimHashUTF8"; }; struct NameWordShingleSimHashCaseInsensitiveUTF8 { static constexpr auto name = "wordShingleSimHashCaseInsensitiveUTF8"; }; struct NameNgramMinHash { static constexpr auto name = "ngramMinHash"; }; struct NameNgramMinHashCaseInsensitive { static constexpr auto name = "ngramMinHashCaseInsensitive"; }; struct NameNgramMinHashUTF8 { static constexpr auto name = "ngramMinHashUTF8"; }; struct NameNgramMinHashCaseInsensitiveUTF8 { static constexpr auto name = "ngramMinHashCaseInsensitiveUTF8"; }; struct NameWordShingleMinHash { static constexpr auto name = "wordShingleMinHash"; }; struct NameWordShingleMinHashCaseInsensitive { static constexpr auto name = "wordShingleMinHashCaseInsensitive"; }; struct NameWordShingleMinHashUTF8 { static constexpr auto name = "wordShingleMinHashUTF8"; }; struct NameWordShingleMinHashCaseInsensitiveUTF8 { static constexpr auto name = "wordShingleMinHashCaseInsensitiveUTF8"; }; struct NameNgramMinHashArg { static constexpr auto name = "ngramMinHashArg"; }; struct NameNgramMinHashArgCaseInsensitive { static constexpr auto name = "ngramMinHashArgCaseInsensitive"; }; struct NameNgramMinHashArgUTF8 { static constexpr auto name = "ngramMinHashArgUTF8"; }; struct NameNgramMinHashArgCaseInsensitiveUTF8 { static constexpr auto name = "ngramMinHashArgCaseInsensitiveUTF8"; }; struct NameWordShingleMinHashArg { static constexpr auto name = "wordShingleMinHashArg"; }; struct NameWordShingleMinHashArgCaseInsensitive { static constexpr auto name = "wordShingleMinHashArgCaseInsensitive"; }; struct NameWordShingleMinHashArgUTF8 { static constexpr auto name = "wordShingleMinHashArgUTF8"; }; struct NameWordShingleMinHashArgCaseInsensitiveUTF8 { static constexpr auto name = "wordShingleMinHashArgCaseInsensitiveUTF8"; }; // SimHash using FunctionNgramSimHash = FunctionsStringHash<SimHashImpl<false, true, false>, NameNgramSimHash, true>; using FunctionNgramSimHashCaseInsensitive = FunctionsStringHash<SimHashImpl<false, true, true>, NameNgramSimHashCaseInsensitive, true>; using FunctionNgramSimHashUTF8 = FunctionsStringHash<SimHashImpl<true, true, false>, NameNgramSimHashUTF8, true>; using FunctionNgramSimHashCaseInsensitiveUTF8 = FunctionsStringHash<SimHashImpl<true, true, true>, NameNgramSimHashCaseInsensitiveUTF8, true>; using FunctionWordShingleSimHash = FunctionsStringHash<SimHashImpl<false, false, false>, NameWordShingleSimHash, true>; using FunctionWordShingleSimHashCaseInsensitive = FunctionsStringHash<SimHashImpl<false, false, true>, NameWordShingleSimHashCaseInsensitive, true>; using FunctionWordShingleSimHashUTF8 = FunctionsStringHash<SimHashImpl<true, false, false>, NameWordShingleSimHashUTF8, true>; using FunctionWordShingleSimHashCaseInsensitiveUTF8 = FunctionsStringHash<SimHashImpl<true, false, true>, NameWordShingleSimHashCaseInsensitiveUTF8, true>; // MinHash using FunctionNgramMinHash = FunctionsStringHash<MinHashImpl<false, true, false>, NameNgramMinHash, false>; using FunctionNgramMinHashCaseInsensitive = FunctionsStringHash<MinHashImpl<false, true, true>, NameNgramMinHashCaseInsensitive, false>; using FunctionNgramMinHashUTF8 = FunctionsStringHash<MinHashImpl<true, true, false>, NameNgramMinHashUTF8, false>; using FunctionNgramMinHashCaseInsensitiveUTF8 = FunctionsStringHash<MinHashImpl<true, true, true>, NameNgramMinHashCaseInsensitiveUTF8, false>; using FunctionWordShingleMinHash = FunctionsStringHash<MinHashImpl<false, false, false>, NameWordShingleMinHash, false>; using FunctionWordShingleMinHashCaseInsensitive = FunctionsStringHash<MinHashImpl<false, false, true>, NameWordShingleMinHashCaseInsensitive, false>; using FunctionWordShingleMinHashUTF8 = FunctionsStringHash<MinHashImpl<true, false, false>, NameWordShingleMinHashUTF8, false>; using FunctionWordShingleMinHashCaseInsensitiveUTF8 = FunctionsStringHash<MinHashImpl<true, false, true>, NameWordShingleMinHashCaseInsensitiveUTF8, false>; // MinHasArg using FunctionNgramMinHashArg = FunctionsStringHash<MinHashImpl<false, true, false>, NameNgramMinHashArg, false, true>; using FunctionNgramMinHashArgCaseInsensitive = FunctionsStringHash<MinHashImpl<false, true, true>, NameNgramMinHashArgCaseInsensitive, false, true>; using FunctionNgramMinHashArgUTF8 = FunctionsStringHash<MinHashImpl<true, true, false>, NameNgramMinHashArgUTF8, false, true>; using FunctionNgramMinHashArgCaseInsensitiveUTF8 = FunctionsStringHash<MinHashImpl<true, true, true>, NameNgramMinHashArgCaseInsensitiveUTF8, false, true>; using FunctionWordShingleMinHashArg = FunctionsStringHash<MinHashImpl<false, false, false>, NameWordShingleMinHashArg, false, true>; using FunctionWordShingleMinHashArgCaseInsensitive = FunctionsStringHash<MinHashImpl<false, false, true>, NameWordShingleMinHashArgCaseInsensitive, false, true>; using FunctionWordShingleMinHashArgUTF8 = FunctionsStringHash<MinHashImpl<true, false, false>, NameWordShingleMinHashArgUTF8, false, true>; using FunctionWordShingleMinHashArgCaseInsensitiveUTF8 = FunctionsStringHash<MinHashImpl<true, false, true>, NameWordShingleMinHashArgCaseInsensitiveUTF8, false, true>; void registerFunctionsStringHash(FunctionFactory & factory) { factory.registerFunction<FunctionNgramSimHash>(); factory.registerFunction<FunctionNgramSimHashCaseInsensitive>(); factory.registerFunction<FunctionNgramSimHashUTF8>(); factory.registerFunction<FunctionNgramSimHashCaseInsensitiveUTF8>(); factory.registerFunction<FunctionWordShingleSimHash>(); factory.registerFunction<FunctionWordShingleSimHashCaseInsensitive>(); factory.registerFunction<FunctionWordShingleSimHashUTF8>(); factory.registerFunction<FunctionWordShingleSimHashCaseInsensitiveUTF8>(); factory.registerFunction<FunctionNgramMinHash>(); factory.registerFunction<FunctionNgramMinHashCaseInsensitive>(); factory.registerFunction<FunctionNgramMinHashUTF8>(); factory.registerFunction<FunctionNgramMinHashCaseInsensitiveUTF8>(); factory.registerFunction<FunctionWordShingleMinHash>(); factory.registerFunction<FunctionWordShingleMinHashCaseInsensitive>(); factory.registerFunction<FunctionWordShingleMinHashUTF8>(); factory.registerFunction<FunctionWordShingleMinHashCaseInsensitiveUTF8>(); factory.registerFunction<FunctionNgramMinHashArg>(); factory.registerFunction<FunctionNgramMinHashArgCaseInsensitive>(); factory.registerFunction<FunctionNgramMinHashArgUTF8>(); factory.registerFunction<FunctionNgramMinHashArgCaseInsensitiveUTF8>(); factory.registerFunction<FunctionWordShingleMinHashArg>(); factory.registerFunction<FunctionWordShingleMinHashArgCaseInsensitive>(); factory.registerFunction<FunctionWordShingleMinHashArgUTF8>(); factory.registerFunction<FunctionWordShingleMinHashArgCaseInsensitiveUTF8>(); } }
11,115
431
<reponame>lucianoconforto/CPPWebFramework /* Copyright 2017 <NAME> and <NAME> Distributed under MIT license, or public domain if desired and recognized in your jurisdiction. See file LICENSE for detail. */ #include <cwf/cppwebapplication.h> #include <controllers/bmicontroller.h> int main(int argc, char *argv[]) { CWF::CppWebApplication server(argc, argv, "/home/herik/CPPWebFramework/examples/WebServices/server/"); server.addController<BmiController>("/bmi"); return server.start(); }
202
764
{"symbol": "ELI","address": "0xc7C03B8a3FC5719066E185ea616e87B88eba44a3","overview":{"en": ""},"email": "<EMAIL>","website": "https://www.eligma.io/","state": "NORMAL","links": {"blog": "https://medium.com/eligma-blog","twitter": "https://twitter.com/eligmacom?lang=en","telegram": "https://t.me/eligma","github": "https://github.com/Eligma/"}}
134
1,063
from django.contrib import admin from . models import CyclecountModeDayModel admin.site.register(CyclecountModeDayModel)
34
313
<gh_stars>100-1000 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from libcst.codemod.commands.noop import NOOPCommand from libcst.testing.utils import UnitTest from fixit.cli.args import get_rule_parser class SomeFakeRule: pass class LintRuleCLIArgsTest(UnitTest): def test_rule_parser(self) -> None: parser = get_rule_parser().parse_args( ["fixit.cli.tests.test_args.SomeFakeRule"] ) self.assertEqual(parser.rule, SomeFakeRule) def test_rule_parser_external_module(self) -> None: # External modules work, as long as they are a dependency parser = get_rule_parser().parse_args( ["libcst.codemod.commands.noop.NOOPCommand"] ) self.assertEqual(parser.rule, NOOPCommand)
342
1,603
<filename>metadata-ingestion/tests/unit/test_transform_dataset.py import re from typing import Any, Dict, List, MutableSequence, Optional, Union from unittest import mock from uuid import uuid4 import pytest import datahub.emitter.mce_builder as builder import datahub.metadata.schema_classes as models import tests.test_helpers.mce_helpers from datahub.emitter.mcp import MetadataChangeProposalWrapper from datahub.ingestion.api import workunit from datahub.ingestion.api.common import EndOfStream, PipelineContext, RecordEnvelope from datahub.ingestion.run.pipeline import Pipeline from datahub.ingestion.transformer.add_dataset_browse_path import ( AddDatasetBrowsePathTransformer, ) from datahub.ingestion.transformer.add_dataset_ownership import ( AddDatasetOwnership, PatternAddDatasetOwnership, SimpleAddDatasetOwnership, ) from datahub.ingestion.transformer.add_dataset_properties import ( AddDatasetProperties, AddDatasetPropertiesResolverBase, SimpleAddDatasetProperties, ) from datahub.ingestion.transformer.add_dataset_schema_tags import ( PatternAddDatasetSchemaTags, ) from datahub.ingestion.transformer.add_dataset_schema_terms import ( PatternAddDatasetSchemaTerms, ) from datahub.ingestion.transformer.add_dataset_tags import ( AddDatasetTags, PatternAddDatasetTags, SimpleAddDatasetTags, ) from datahub.ingestion.transformer.add_dataset_terms import ( PatternAddDatasetTerms, SimpleAddDatasetTerms, ) from datahub.ingestion.transformer.base_transformer import ( BaseTransformer, SingleAspectTransformer, ) from datahub.ingestion.transformer.dataset_transformer import DatasetTransformer from datahub.ingestion.transformer.mark_dataset_status import MarkDatasetStatus from datahub.ingestion.transformer.remove_dataset_ownership import ( SimpleRemoveDatasetOwnership, ) from datahub.metadata.schema_classes import ( BrowsePathsClass, ChangeTypeClass, DatasetPropertiesClass, DatasetSnapshotClass, GlobalTagsClass, MetadataChangeEventClass, OwnershipClass, StatusClass, TagAssociationClass, ) from datahub.utilities.urns.dataset_urn import DatasetUrn from datahub.utilities.urns.urn import Urn def make_generic_dataset( entity_urn: str = "urn:li:dataset:(urn:li:dataPlatform:bigquery,example1,PROD)", aspects: List[Any] = [models.StatusClass(removed=False)], ) -> models.MetadataChangeEventClass: return models.MetadataChangeEventClass( proposedSnapshot=models.DatasetSnapshotClass( urn=entity_urn, aspects=aspects, ), ) def make_generic_dataset_mcp( entity_urn: str = "urn:li:dataset:(urn:li:dataPlatform:bigquery,example1,PROD)", aspect_name: str = "status", aspect: Any = models.StatusClass(removed=False), ) -> MetadataChangeProposalWrapper: return MetadataChangeProposalWrapper( entityUrn=entity_urn, entityType=Urn.create_from_string(entity_urn).get_type(), aspectName=aspect_name, changeType="UPSERT", aspect=aspect, ) def create_and_run_test_pipeline( events: List[Union[MetadataChangeEventClass, MetadataChangeProposalWrapper]], transformers: List[Dict[str, Any]], path: str, ) -> str: with mock.patch( "tests.unit.test_source.FakeSource.get_workunits" ) as mock_getworkunits: mock_getworkunits.return_value = [ workunit.MetadataWorkUnit( id=f"test-workunit-mce-{e.proposedSnapshot.urn}", mce=e ) if isinstance(e, MetadataChangeEventClass) else workunit.MetadataWorkUnit( id=f"test-workunit-mcp-{e.entityUrn}-{e.aspectName}", mcp=e ) for e in events ] events_file = f"{path}/{str(uuid4())}.json" pipeline = Pipeline.create( config_dict={ "source": { "type": "tests.unit.test_source.FakeSource", "config": {}, }, "transformers": transformers, "sink": {"type": "file", "config": {"filename": events_file}}, } ) pipeline.run() pipeline.raise_from_status() return events_file def make_dataset_with_owner() -> models.MetadataChangeEventClass: return models.MetadataChangeEventClass( proposedSnapshot=models.DatasetSnapshotClass( urn="urn:li:dataset:(urn:li:dataPlatform:bigquery,example2,PROD)", aspects=[ models.OwnershipClass( owners=[ models.OwnerClass( owner=builder.make_user_urn("fake_owner"), type=models.OwnershipTypeClass.DATAOWNER, ), ], lastModified=models.AuditStampClass( time=1625266033123, actor="urn:li:corpuser:datahub" ), ) ], ), ) EXISTING_PROPERTIES = {"my_existing_property": "existing property value"} def make_dataset_with_properties() -> models.MetadataChangeEventClass: return models.MetadataChangeEventClass( proposedSnapshot=models.DatasetSnapshotClass( urn="urn:li:dataset:(urn:li:dataPlatform:bigquery,example1,PROD)", aspects=[ models.StatusClass(removed=False), models.DatasetPropertiesClass( customProperties=EXISTING_PROPERTIES.copy() ), ], ), ) def test_simple_dataset_ownership_transformation(mock_time): no_owner_aspect = make_generic_dataset() with_owner_aspect = make_dataset_with_owner() not_a_dataset = models.MetadataChangeEventClass( proposedSnapshot=models.DataJobSnapshotClass( urn="urn:li:dataJob:(urn:li:dataFlow:(airflow,dag_abc,PROD),task_456)", aspects=[ models.DataJobInfoClass( name="User Deletions", description="Constructs the fct_users_deleted from logging_events", type=models.AzkabanJobTypeClass.SQL, ) ], ) ) inputs = [no_owner_aspect, with_owner_aspect, not_a_dataset, EndOfStream()] transformer = SimpleAddDatasetOwnership.create( { "owner_urns": [ builder.make_user_urn("person1"), builder.make_user_urn("person2"), ] }, PipelineContext(run_id="test"), ) outputs = list( transformer.transform([RecordEnvelope(input, metadata={}) for input in inputs]) ) assert len(outputs) == len(inputs) + 1 # Check the first entry. first_ownership_aspect = builder.get_aspect_if_available( outputs[0].record, models.OwnershipClass ) assert first_ownership_aspect is None last_event = outputs[3].record assert isinstance(last_event, MetadataChangeProposalWrapper) assert isinstance(last_event.aspect, OwnershipClass) assert len(last_event.aspect.owners) == 2 assert last_event.entityUrn == outputs[0].record.proposedSnapshot.urn assert all( [ owner.type == models.OwnershipTypeClass.DATAOWNER for owner in last_event.aspect.owners ] ) # Check the second entry. second_ownership_aspect = builder.get_aspect_if_available( outputs[1].record, models.OwnershipClass ) assert second_ownership_aspect assert len(second_ownership_aspect.owners) == 3 assert all( [ owner.type == models.OwnershipTypeClass.DATAOWNER for owner in second_ownership_aspect.owners ] ) # Verify that the third entry is unchanged. assert inputs[2] == outputs[2].record # Verify that the last entry is EndOfStream assert inputs[3] == outputs[4].record def test_simple_dataset_ownership_with_type_transformation(mock_time): input = make_generic_dataset() transformer = SimpleAddDatasetOwnership.create( { "owner_urns": [ builder.make_user_urn("person1"), ], "ownership_type": "PRODUCER", }, PipelineContext(run_id="test"), ) output = list( transformer.transform( [ RecordEnvelope(input, metadata={}), RecordEnvelope(EndOfStream(), metadata={}), ] ) ) assert len(output) == 3 # original MCE is unchanged assert input == output[0].record ownership_aspect = output[1].record.aspect assert isinstance(ownership_aspect, OwnershipClass) assert len(ownership_aspect.owners) == 1 assert ownership_aspect.owners[0].type == models.OwnershipTypeClass.PRODUCER def test_simple_dataset_ownership_with_invalid_type_transformation(mock_time): with pytest.raises(ValueError): SimpleAddDatasetOwnership.create( { "owner_urns": [ builder.make_user_urn("person1"), ], "ownership_type": "INVALID_TYPE", }, PipelineContext(run_id="test"), ) def test_simple_remove_dataset_ownership(): with_owner_aspect = make_dataset_with_owner() transformer = SimpleRemoveDatasetOwnership.create( {}, PipelineContext(run_id="test"), ) outputs = list( transformer.transform([RecordEnvelope(with_owner_aspect, metadata={})]) ) ownership_aspect = builder.get_aspect_if_available( outputs[0].record, models.OwnershipClass ) assert ownership_aspect assert len(ownership_aspect.owners) == 0 def test_mark_status_dataset(tmp_path): dataset = make_generic_dataset() transformer = MarkDatasetStatus.create( {"removed": True}, PipelineContext(run_id="test"), ) removed = list( transformer.transform( [ RecordEnvelope(dataset, metadata={}), ] ) ) assert len(removed) == 1 status_aspect = builder.get_aspect_if_available( removed[0].record, models.StatusClass ) assert status_aspect assert status_aspect.removed is True transformer = MarkDatasetStatus.create( {"removed": False}, PipelineContext(run_id="test"), ) not_removed = list( transformer.transform( [ RecordEnvelope(dataset, metadata={}), ] ) ) assert len(not_removed) == 1 status_aspect = builder.get_aspect_if_available( not_removed[0].record, models.StatusClass ) assert status_aspect assert status_aspect.removed is False mcp = make_generic_dataset_mcp( aspect_name="datasetProperties", aspect=DatasetPropertiesClass(description="Test dataset"), ) events_file = create_and_run_test_pipeline( events=[mcp], transformers=[{"type": "mark_dataset_status", "config": {"removed": True}}], path=tmp_path, ) # assert dataset properties aspect was preserved assert ( tests.test_helpers.mce_helpers.assert_for_each_entity( entity_type="dataset", aspect_name="datasetProperties", aspect_field_matcher={"description": "Test dataset"}, file=events_file, ) == 1 ) # assert Status aspect was generated assert ( tests.test_helpers.mce_helpers.assert_for_each_entity( entity_type="dataset", aspect_name="status", aspect_field_matcher={"removed": True}, file=events_file, ) == 1 ) # MCE only test_aspect = DatasetPropertiesClass(description="Test dataset") events_file = create_and_run_test_pipeline( events=[make_generic_dataset(aspects=[test_aspect])], transformers=[{"type": "mark_dataset_status", "config": {"removed": True}}], path=tmp_path, ) # assert dataset properties aspect was preserved assert ( tests.test_helpers.mce_helpers.assert_entity_mce_aspect( entity_urn=mcp.entityUrn or "", aspect=test_aspect, aspect_type=DatasetPropertiesClass, file=events_file, ) == 1 ) # assert Status aspect was generated assert ( tests.test_helpers.mce_helpers.assert_for_each_entity( entity_type="dataset", aspect_name="status", aspect_field_matcher={"removed": True}, file=events_file, ) == 1 ) # MCE (non-matching) + MCP (matching) test_aspect = DatasetPropertiesClass(description="Test dataset") events_file = create_and_run_test_pipeline( events=[ make_generic_dataset(aspects=[test_aspect]), make_generic_dataset_mcp(), ], transformers=[{"type": "mark_dataset_status", "config": {"removed": True}}], path=tmp_path, ) # assert dataset properties aspect was preserved assert ( tests.test_helpers.mce_helpers.assert_entity_mce_aspect( entity_urn=mcp.entityUrn or "", aspect=test_aspect, aspect_type=DatasetPropertiesClass, file=events_file, ) == 1 ) # assert Status aspect was generated assert ( tests.test_helpers.mce_helpers.assert_for_each_entity( entity_type="dataset", aspect_name="status", aspect_field_matcher={"removed": True}, file=events_file, ) == 1 ) # MCE (matching) + MCP (non-matching) test_status_aspect = StatusClass(removed=False) events_file = create_and_run_test_pipeline( events=[ make_generic_dataset(aspects=[test_status_aspect]), make_generic_dataset_mcp( aspect_name="datasetProperties", aspect=DatasetPropertiesClass(description="test dataset"), ), ], transformers=[{"type": "mark_dataset_status", "config": {"removed": True}}], path=tmp_path, ) # assert MCE was transformed assert ( tests.test_helpers.mce_helpers.assert_entity_mce_aspect( entity_urn=mcp.entityUrn or "", aspect=StatusClass(removed=True), aspect_type=StatusClass, file=events_file, ) == 1 ) # assert MCP aspect was preserved assert ( tests.test_helpers.mce_helpers.assert_for_each_entity( entity_type="dataset", aspect_name="datasetProperties", aspect_field_matcher={"description": "test dataset"}, file=events_file, ) == 1 ) # MCE (non-matching) + MCP (non-matching) test_mcp_aspect = GlobalTagsClass(tags=[TagAssociationClass(tag="urn:li:tag:test")]) test_dataset_props_aspect = DatasetPropertiesClass(description="Test dataset") events_file = create_and_run_test_pipeline( events=[ make_generic_dataset(aspects=[test_dataset_props_aspect]), make_generic_dataset_mcp(aspect_name="globalTags", aspect=test_mcp_aspect), ], transformers=[{"type": "mark_dataset_status", "config": {"removed": True}}], path=tmp_path, ) # assert MCE was preserved assert ( tests.test_helpers.mce_helpers.assert_entity_mce_aspect( entity_urn=mcp.entityUrn or "", aspect=test_dataset_props_aspect, aspect_type=DatasetPropertiesClass, file=events_file, ) == 1 ) # assert MCP aspect was preserved assert ( tests.test_helpers.mce_helpers.assert_for_each_entity( entity_type="dataset", aspect_name="globalTags", aspect_field_matcher={"tags": [{"tag": "urn:li:tag:test"}]}, file=events_file, ) == 1 ) # assert MCP Status aspect was generated assert ( tests.test_helpers.mce_helpers.assert_for_each_entity( entity_type="dataset", aspect_name="status", aspect_field_matcher={"removed": True}, file=events_file, ) == 1 ) def test_add_dataset_browse_paths(): dataset = make_generic_dataset() transformer = AddDatasetBrowsePathTransformer.create( {"path_templates": ["/abc"]}, PipelineContext(run_id="test"), ) transformed = list( transformer.transform( [ RecordEnvelope(dataset, metadata={}), RecordEnvelope(EndOfStream(), metadata={}), ] ) ) browse_path_aspect = transformed[1].record.aspect assert browse_path_aspect assert browse_path_aspect.paths == ["/abc"] # use an mce with a pre-existing browse path dataset_mce = make_generic_dataset( aspects=[StatusClass(removed=False), browse_path_aspect] ) transformer = AddDatasetBrowsePathTransformer.create( { "path_templates": [ "/PLATFORM/foo/DATASET_PARTS/ENV", "/ENV/PLATFORM/bar/DATASET_PARTS/", ] }, PipelineContext(run_id="test"), ) transformed = list( transformer.transform( [ RecordEnvelope(dataset_mce, metadata={}), RecordEnvelope(EndOfStream(), metadata={}), ] ) ) assert len(transformed) == 2 browse_path_aspect = builder.get_aspect_if_available( transformed[0].record, BrowsePathsClass ) assert browse_path_aspect assert browse_path_aspect.paths == [ "/abc", "/bigquery/foo/example1/prod", "/prod/bigquery/bar/example1/", ] transformer = AddDatasetBrowsePathTransformer.create( { "path_templates": [ "/xyz", ], "replace_existing": True, }, PipelineContext(run_id="test"), ) transformed = list( transformer.transform( [ RecordEnvelope(dataset_mce, metadata={}), RecordEnvelope(EndOfStream(), metadata={}), ] ) ) assert len(transformed) == 2 browse_path_aspect = builder.get_aspect_if_available( transformed[0].record, BrowsePathsClass ) assert browse_path_aspect assert browse_path_aspect.paths == [ "/xyz", ] def test_simple_dataset_tags_transformation(mock_time): dataset_mce = make_generic_dataset() transformer = SimpleAddDatasetTags.create( { "tag_urns": [ builder.make_tag_urn("NeedsDocumentation"), builder.make_tag_urn("Legacy"), ] }, PipelineContext(run_id="test-tags"), ) outputs = list( transformer.transform( [ RecordEnvelope(input, metadata={}) for input in [dataset_mce, EndOfStream()] ] ) ) assert len(outputs) == 3 # Check that tags were added. tags_aspect = outputs[1].record.aspect assert tags_aspect assert len(tags_aspect.tags) == 2 assert tags_aspect.tags[0].tag == builder.make_tag_urn("NeedsDocumentation") def dummy_tag_resolver_method(dataset_snapshot): return [] def test_pattern_dataset_tags_transformation(mock_time): dataset_mce = make_generic_dataset() transformer = PatternAddDatasetTags.create( { "tag_pattern": { "rules": { ".*example1.*": [ builder.make_tag_urn("Private"), builder.make_tag_urn("Legacy"), ], ".*example2.*": [builder.make_term_urn("Needs Documentation")], } }, }, PipelineContext(run_id="test-tags"), ) outputs = list( transformer.transform( [ RecordEnvelope(input, metadata={}) for input in [dataset_mce, EndOfStream()] ] ) ) assert len(outputs) == 3 tags_aspect = outputs[1].record.aspect assert tags_aspect assert len(tags_aspect.tags) == 2 assert tags_aspect.tags[0].tag == builder.make_tag_urn("Private") assert builder.make_tag_urn("Needs Documentation") not in tags_aspect.tags def test_import_resolver(): transformer = AddDatasetTags.create( { "get_tags_to_add": "tests.unit.test_transform_dataset.dummy_tag_resolver_method" }, PipelineContext(run_id="test-tags"), ) output = list( transformer.transform( [RecordEnvelope(input, metadata={}) for input in [make_generic_dataset()]] ) ) assert output def test_pattern_dataset_ownership_transformation(mock_time): no_owner_aspect = make_generic_dataset() with_owner_aspect = models.MetadataChangeEventClass( proposedSnapshot=models.DatasetSnapshotClass( urn="urn:li:dataset:(urn:li:dataPlatform:bigquery,example2,PROD)", aspects=[ models.OwnershipClass( owners=[ models.OwnerClass( owner=builder.make_user_urn("fake_owner"), type=models.OwnershipTypeClass.DATAOWNER, ), ], lastModified=models.AuditStampClass( time=1625266033123, actor="urn:li:corpuser:datahub" ), ) ], ), ) not_a_dataset = models.MetadataChangeEventClass( proposedSnapshot=models.DataJobSnapshotClass( urn="urn:li:dataJob:(urn:li:dataFlow:(airflow,dag_abc,PROD),task_456)", aspects=[ models.DataJobInfoClass( name="User Deletions", description="Constructs the fct_users_deleted from logging_events", type=models.AzkabanJobTypeClass.SQL, ) ], ) ) inputs = [no_owner_aspect, with_owner_aspect, not_a_dataset, EndOfStream()] transformer = PatternAddDatasetOwnership.create( { "owner_pattern": { "rules": { ".*example1.*": [builder.make_user_urn("person1")], ".*example2.*": [builder.make_user_urn("person2")], } }, }, PipelineContext(run_id="test"), ) outputs = list( transformer.transform([RecordEnvelope(input, metadata={}) for input in inputs]) ) assert len(outputs) == len(inputs) + 1 # additional MCP due to the no-owner MCE # Check the first entry. assert inputs[0] == outputs[0].record first_ownership_aspect = outputs[3].record.aspect assert first_ownership_aspect assert len(first_ownership_aspect.owners) == 1 assert all( [ owner.type == models.OwnershipTypeClass.DATAOWNER for owner in first_ownership_aspect.owners ] ) # Check the second entry. second_ownership_aspect = builder.get_aspect_if_available( outputs[1].record, models.OwnershipClass ) assert second_ownership_aspect assert len(second_ownership_aspect.owners) == 2 assert all( [ owner.type == models.OwnershipTypeClass.DATAOWNER for owner in second_ownership_aspect.owners ] ) # Verify that the third entry is unchanged. assert inputs[2] == outputs[2].record # Verify that the last entry is unchanged (EOS) assert inputs[-1] == outputs[-1].record def test_pattern_dataset_ownership_with_type_transformation(mock_time): input = make_generic_dataset() transformer = PatternAddDatasetOwnership.create( { "owner_pattern": { "rules": { ".*example1.*": [builder.make_user_urn("person1")], } }, "ownership_type": "PRODUCER", }, PipelineContext(run_id="test"), ) output = list( transformer.transform( [ RecordEnvelope(input, metadata={}), RecordEnvelope(EndOfStream(), metadata={}), ] ) ) assert len(output) == 3 ownership_aspect = output[1].record.aspect assert ownership_aspect assert len(ownership_aspect.owners) == 1 assert ownership_aspect.owners[0].type == models.OwnershipTypeClass.PRODUCER def test_pattern_dataset_ownership_with_invalid_type_transformation(mock_time): with pytest.raises(ValueError): PatternAddDatasetOwnership.create( { "owner_pattern": { "rules": { ".*example1.*": [builder.make_user_urn("person1")], } }, "ownership_type": "INVALID_TYPE", }, PipelineContext(run_id="test"), ) def gen_owners( owners: List[str], ownership_type: Union[ str, models.OwnershipTypeClass ] = models.OwnershipTypeClass.DATAOWNER, ) -> models.OwnershipClass: return models.OwnershipClass( owners=[models.OwnerClass(owner=owner, type=ownership_type) for owner in owners] ) def test_ownership_patching_intersect(mock_time): mock_graph = mock.MagicMock() server_ownership = gen_owners(["foo", "bar"]) mce_ownership = gen_owners(["baz", "foo"]) mock_graph.get_ownership.return_value = server_ownership test_ownership = AddDatasetOwnership.get_ownership_to_set( mock_graph, "test_urn", mce_ownership ) assert test_ownership and test_ownership.owners assert "foo" in [o.owner for o in test_ownership.owners] assert "bar" in [o.owner for o in test_ownership.owners] assert "baz" in [o.owner for o in test_ownership.owners] def test_ownership_patching_with_nones(mock_time): mock_graph = mock.MagicMock() mce_ownership = gen_owners(["baz", "foo"]) mock_graph.get_ownership.return_value = None test_ownership = AddDatasetOwnership.get_ownership_to_set( mock_graph, "test_urn", mce_ownership ) assert test_ownership and test_ownership.owners assert "foo" in [o.owner for o in test_ownership.owners] assert "baz" in [o.owner for o in test_ownership.owners] server_ownership = gen_owners(["baz", "foo"]) mock_graph.get_ownership.return_value = server_ownership test_ownership = AddDatasetOwnership.get_ownership_to_set( mock_graph, "test_urn", None ) assert not test_ownership def test_ownership_patching_with_empty_mce_none_server(mock_time): mock_graph = mock.MagicMock() mce_ownership = gen_owners([]) mock_graph.get_ownership.return_value = None test_ownership = AddDatasetOwnership.get_ownership_to_set( mock_graph, "test_urn", mce_ownership ) # nothing to add, so we omit writing assert test_ownership is None def test_ownership_patching_with_empty_mce_nonempty_server(mock_time): mock_graph = mock.MagicMock() server_ownership = gen_owners(["baz", "foo"]) mce_ownership = gen_owners([]) mock_graph.get_ownership.return_value = server_ownership test_ownership = AddDatasetOwnership.get_ownership_to_set( mock_graph, "test_urn", mce_ownership ) # nothing to add, so we omit writing assert test_ownership is None def test_ownership_patching_with_different_types_1(mock_time): mock_graph = mock.MagicMock() server_ownership = gen_owners(["baz", "foo"], models.OwnershipTypeClass.PRODUCER) mce_ownership = gen_owners(["foo"], models.OwnershipTypeClass.DATAOWNER) mock_graph.get_ownership.return_value = server_ownership test_ownership = AddDatasetOwnership.get_ownership_to_set( mock_graph, "test_urn", mce_ownership ) assert test_ownership and test_ownership.owners # nothing to add, so we omit writing assert ("foo", models.OwnershipTypeClass.DATAOWNER) in [ (o.owner, o.type) for o in test_ownership.owners ] assert ("baz", models.OwnershipTypeClass.PRODUCER) in [ (o.owner, o.type) for o in test_ownership.owners ] def test_ownership_patching_with_different_types_2(mock_time): mock_graph = mock.MagicMock() server_ownership = gen_owners(["baz", "foo"], models.OwnershipTypeClass.PRODUCER) mce_ownership = gen_owners(["foo", "baz"], models.OwnershipTypeClass.DATAOWNER) mock_graph.get_ownership.return_value = server_ownership test_ownership = AddDatasetOwnership.get_ownership_to_set( mock_graph, "test_urn", mce_ownership ) assert test_ownership and test_ownership.owners assert len(test_ownership.owners) == 2 # nothing to add, so we omit writing assert ("foo", models.OwnershipTypeClass.DATAOWNER) in [ (o.owner, o.type) for o in test_ownership.owners ] assert ("baz", models.OwnershipTypeClass.DATAOWNER) in [ (o.owner, o.type) for o in test_ownership.owners ] PROPERTIES_TO_ADD = {"my_new_property": "property value"} class DummyPropertiesResolverClass(AddDatasetPropertiesResolverBase): def get_properties_to_add(self, current: DatasetSnapshotClass) -> Dict[str, str]: return PROPERTIES_TO_ADD def test_add_dataset_properties(mock_time): dataset_mce = make_dataset_with_properties() transformer = AddDatasetProperties.create( { "add_properties_resolver_class": "tests.unit.test_transform_dataset.DummyPropertiesResolverClass" }, PipelineContext(run_id="test-properties"), ) outputs = list( transformer.transform( [RecordEnvelope(input, metadata={}) for input in [dataset_mce]] ) ) assert len(outputs) == 1 custom_properties = builder.get_aspect_if_available( outputs[0].record, models.DatasetPropertiesClass ) assert custom_properties is not None assert custom_properties.customProperties == { **EXISTING_PROPERTIES, **PROPERTIES_TO_ADD, } def test_simple_add_dataset_properties(mock_time): dataset_mce = make_dataset_with_properties() new_properties = {"new-simple-property": "new-value"} transformer = SimpleAddDatasetProperties.create( { "properties": new_properties, }, PipelineContext(run_id="test-simple-properties"), ) outputs = list( transformer.transform( [RecordEnvelope(input, metadata={}) for input in [dataset_mce]] ) ) assert len(outputs) == 1 custom_properties = builder.get_aspect_if_available( outputs[0].record, models.DatasetPropertiesClass ) print(str(custom_properties)) assert custom_properties is not None assert custom_properties.customProperties == { **EXISTING_PROPERTIES, **new_properties, } def test_simple_dataset_terms_transformation(mock_time): dataset_mce = make_generic_dataset() transformer = SimpleAddDatasetTerms.create( { "term_urns": [ builder.make_term_urn("Test"), builder.make_term_urn("Needs Review"), ] }, PipelineContext(run_id="test-terms"), ) outputs = list( transformer.transform( [ RecordEnvelope(input, metadata={}) for input in [dataset_mce, EndOfStream()] ] ) ) assert len(outputs) == 3 # Check that glossary terms were added. terms_aspect = outputs[1].record.aspect assert terms_aspect assert len(terms_aspect.terms) == 2 assert terms_aspect.terms[0].urn == builder.make_term_urn("Test") def test_pattern_dataset_terms_transformation(mock_time): dataset_mce = make_generic_dataset() transformer = PatternAddDatasetTerms.create( { "term_pattern": { "rules": { ".*example1.*": [ builder.make_term_urn("AccountBalance"), builder.make_term_urn("Email"), ], ".*example2.*": [builder.make_term_urn("Address")], } }, }, PipelineContext(run_id="test-terms"), ) outputs = list( transformer.transform( [ RecordEnvelope(input, metadata={}) for input in [dataset_mce, EndOfStream()] ] ) ) assert len(outputs) == 3 # Check that glossary terms were added. terms_aspect = outputs[1].record.aspect assert terms_aspect assert len(terms_aspect.terms) == 2 assert terms_aspect.terms[0].urn == builder.make_term_urn("AccountBalance") assert builder.make_term_urn("AccountBalance") not in terms_aspect.terms def test_mcp_add_tags_missing(mock_time): dataset_mcp = make_generic_dataset_mcp() transformer = SimpleAddDatasetTags.create( { "tag_urns": [ builder.make_tag_urn("NeedsDocumentation"), builder.make_tag_urn("Legacy"), ] }, PipelineContext(run_id="test-tags"), ) input_stream: List[RecordEnvelope] = [ RecordEnvelope(input, metadata={}) for input in [dataset_mcp] ] input_stream.append(RecordEnvelope(record=EndOfStream(), metadata={})) outputs = list(transformer.transform(input_stream)) assert len(outputs) == 3 assert outputs[0].record == dataset_mcp # Check that tags were added, this will be the second result tags_aspect = outputs[1].record.aspect assert tags_aspect assert len(tags_aspect.tags) == 2 assert tags_aspect.tags[0].tag == builder.make_tag_urn("NeedsDocumentation") assert isinstance(outputs[-1].record, EndOfStream) def test_mcp_add_tags_existing(mock_time): dataset_mcp = make_generic_dataset_mcp( aspect_name="globalTags", aspect=GlobalTagsClass( tags=[TagAssociationClass(tag=builder.make_tag_urn("Test"))] ), ) transformer = SimpleAddDatasetTags.create( { "tag_urns": [ builder.make_tag_urn("NeedsDocumentation"), builder.make_tag_urn("Legacy"), ] }, PipelineContext(run_id="test-tags"), ) input_stream: List[RecordEnvelope] = [ RecordEnvelope(input, metadata={}) for input in [dataset_mcp] ] input_stream.append(RecordEnvelope(record=EndOfStream(), metadata={})) outputs = list(transformer.transform(input_stream)) assert len(outputs) == 2 # Check that tags were added, this will be the second result tags_aspect = outputs[0].record.aspect assert tags_aspect assert len(tags_aspect.tags) == 3 assert tags_aspect.tags[0].tag == builder.make_tag_urn("Test") assert tags_aspect.tags[1].tag == builder.make_tag_urn("NeedsDocumentation") assert isinstance(outputs[-1].record, EndOfStream) def test_mcp_multiple_transformers(mock_time, tmp_path): events_file = f"{tmp_path}/multi_transformer_test.json" pipeline = Pipeline.create( config_dict={ "source": { "type": "tests.unit.test_source.FakeSource", "config": {}, }, "transformers": [ { "type": "set_dataset_browse_path", "config": { "path_templates": ["/ENV/PLATFORM/EsComments/DATASET_PARTS"] }, }, { "type": "simple_add_dataset_tags", "config": {"tag_urns": ["urn:li:tag:EsComments"]}, }, ], "sink": {"type": "file", "config": {"filename": events_file}}, } ) pipeline.run() pipeline.raise_from_status() urn_pattern = "^" + re.escape( "urn:li:dataset:(urn:li:dataPlatform:elasticsearch,fooIndex,PROD)" ) assert ( tests.test_helpers.mce_helpers.assert_mcp_entity_urn( filter="ALL", entity_type="dataset", regex_pattern=urn_pattern, file=events_file, ) == 3 ) # check on status aspect assert ( tests.test_helpers.mce_helpers.assert_for_each_entity( entity_type="dataset", aspect_name="status", aspect_field_matcher={"removed": False}, file=events_file, ) == 1 ) # check on globalTags aspect assert ( tests.test_helpers.mce_helpers.assert_for_each_entity( entity_type="dataset", aspect_name="globalTags", aspect_field_matcher={"tags": [{"tag": "urn:li:tag:EsComments"}]}, file=events_file, ) == 1 ) # check on globalTags aspect assert ( tests.test_helpers.mce_helpers.assert_for_each_entity( entity_type="dataset", aspect_name="browsePaths", aspect_field_matcher={"paths": ["/prod/elasticsearch/EsComments/fooIndex"]}, file=events_file, ) == 1 ) def test_mcp_multiple_transformers_replace(mock_time, tmp_path): mcps: MutableSequence[ Union[MetadataChangeEventClass, MetadataChangeProposalWrapper] ] = [ MetadataChangeProposalWrapper( entityType="dataset", changeType=ChangeTypeClass.UPSERT, entityUrn=str( DatasetUrn.create_from_ids( platform_id="elasticsearch", table_name=f"fooBarIndex{i}", env="PROD", ) ), aspectName="globalTags", aspect=GlobalTagsClass(tags=[TagAssociationClass(tag="urn:li:tag:Test")]), ) for i in range(0, 10) ] mcps.extend( [ MetadataChangeProposalWrapper( entityType="dataset", changeType=ChangeTypeClass.UPSERT, entityUrn=str( DatasetUrn.create_from_ids( platform_id="elasticsearch", table_name=f"fooBarIndex{i}", env="PROD", ) ), aspectName="datasetProperties", aspect=DatasetPropertiesClass(description="test dataset"), ) for i in range(0, 10) ] ) # shuffle the mcps import random random.shuffle(mcps) events_file = create_and_run_test_pipeline( events=list(mcps), transformers=[ { "type": "set_dataset_browse_path", "config": { "path_templates": ["/ENV/PLATFORM/EsComments/DATASET_PARTS"] }, }, { "type": "simple_add_dataset_tags", "config": {"tag_urns": ["urn:li:tag:EsComments"]}, }, ], path=tmp_path, ) urn_pattern = "^" + re.escape( "urn:li:dataset:(urn:li:dataPlatform:elasticsearch,fooBarIndex" ) # there should be 30 MCP-s assert ( tests.test_helpers.mce_helpers.assert_mcp_entity_urn( filter="ALL", entity_type="dataset", regex_pattern=urn_pattern, file=events_file, ) == 30 ) # 10 globalTags aspects with new tag attached assert ( tests.test_helpers.mce_helpers.assert_for_each_entity( entity_type="dataset", aspect_name="globalTags", aspect_field_matcher={ "tags": [{"tag": "urn:li:tag:Test"}, {"tag": "urn:li:tag:EsComments"}] }, file=events_file, ) == 10 ) # check on browsePaths aspect for i in range(0, 10): tests.test_helpers.mce_helpers.assert_entity_mcp_aspect( entity_urn=str( DatasetUrn.create_from_ids( platform_id="elasticsearch", table_name=f"fooBarIndex{i}", env="PROD", ) ), aspect_name="browsePaths", aspect_field_matcher={ "paths": [f"/prod/elasticsearch/EsComments/fooBarIndex{i}"] }, file=events_file, ) == 1 class SuppressingTransformer(BaseTransformer, SingleAspectTransformer): @classmethod def create( cls, config_dict: dict, ctx: PipelineContext ) -> "SuppressingTransformer": return SuppressingTransformer() def entity_types(self) -> List[str]: return super().entity_types() def aspect_name(self) -> str: return "datasetProperties" def transform_aspect( self, entity_urn: str, aspect_name: str, aspect: Optional[builder.Aspect] ) -> Optional[builder.Aspect]: return None def test_supression_works(): dataset_mce = make_generic_dataset() dataset_mcp = make_generic_dataset_mcp( aspect_name="datasetProperties", aspect=DatasetPropertiesClass(description="supressable description"), ) transformer = SuppressingTransformer.create( {}, PipelineContext(run_id="test-suppress-transformer"), ) outputs = list( transformer.transform( [ RecordEnvelope(input, metadata={}) for input in [dataset_mce, dataset_mcp, EndOfStream()] ] ) ) assert len(outputs) == 2 # MCP will be dropped class OldMCETransformer(DatasetTransformer): """A simulated legacy MCE transformer""" @classmethod def create(cls, config_dict: dict, ctx: PipelineContext) -> "OldMCETransformer": return OldMCETransformer() def transform_one(self, mce: MetadataChangeEventClass) -> MetadataChangeEventClass: # legacy transformers should not receive metadata change proposal events assert not isinstance(mce, MetadataChangeProposalWrapper) if isinstance(mce, MetadataChangeEventClass): assert isinstance(mce.proposedSnapshot, DatasetSnapshotClass) mce.proposedSnapshot.aspects.append( DatasetPropertiesClass(description="Old Transformer was here") ) return mce def test_old_transformers_working_as_before(mock_time): dataset_mce = make_generic_dataset() dataset_mcp = make_generic_dataset_mcp() transformer = OldMCETransformer.create( {}, PipelineContext(run_id="test-old-transformer"), ) outputs = list( transformer.transform( [ RecordEnvelope(input, metadata={}) for input in [dataset_mce, dataset_mcp, EndOfStream()] ] ) ) assert len(outputs) == 3 # MCP will come back untouched assert outputs[0].record == dataset_mce # Check that glossary terms were added. props_aspect = builder.get_aspect_if_available( outputs[0].record, DatasetPropertiesClass ) assert props_aspect assert props_aspect.description == "Old Transformer was here" assert outputs[1].record == dataset_mcp assert isinstance(outputs[-1].record, EndOfStream) # MCP only stream dataset_mcps = [ make_generic_dataset_mcp(), make_generic_dataset_mcp( aspect=DatasetPropertiesClass(description="Another test MCP") ), EndOfStream(), ] transformer = OldMCETransformer.create( {}, PipelineContext(run_id="test-old-transformer"), ) outputs = list( transformer.transform( [RecordEnvelope(input, metadata={}) for input in dataset_mcps] ) ) assert len(outputs) == 3 # MCP-s will come back untouched assert outputs[0].record == dataset_mcps[0] assert outputs[1].record == dataset_mcps[1] assert isinstance(outputs[-1].record, EndOfStream) def test_pattern_dataset_schema_terms_transformation(mock_time): dataset_mce = make_generic_dataset( aspects=[ models.SchemaMetadataClass( schemaName="customer", # not used platform=builder.make_data_platform_urn( "hive" ), # important <- platform must be an urn version=0, # when the source system has a notion of versioning of schemas, insert this in, otherwise leave as 0 hash="", # when the source system has a notion of unique schemas identified via hash, include a hash, else leave it as empty string platformSchema=models.OtherSchemaClass( rawSchema="__insert raw schema here__" ), fields=[ models.SchemaFieldClass( fieldPath="address", type=models.SchemaFieldDataTypeClass( type=models.StringTypeClass() ), nativeDataType="VARCHAR(100)", # use this to provide the type of the field in the source system's vernacular ), models.SchemaFieldClass( fieldPath="first_name", type=models.SchemaFieldDataTypeClass( type=models.StringTypeClass() ), nativeDataType="VARCHAR(100)", # use this to provide the type of the field in the source system's vernacular ), models.SchemaFieldClass( fieldPath="last_name", type=models.SchemaFieldDataTypeClass( type=models.StringTypeClass() ), nativeDataType="VARCHAR(100)", # use this to provide the type of the field in the source system's vernacular ), ], ) ] ) transformer = PatternAddDatasetSchemaTerms.create( { "term_pattern": { "rules": { ".*first_name.*": [ builder.make_term_urn("Name"), builder.make_term_urn("FirstName"), ], ".*last_name.*": [ builder.make_term_urn("Name"), builder.make_term_urn("LastName"), ], } }, }, PipelineContext(run_id="test-schema-terms"), ) outputs = list( transformer.transform( [ RecordEnvelope(input, metadata={}) for input in [dataset_mce, EndOfStream()] ] ) ) assert len(outputs) == 2 # Check that glossary terms were added. schema_aspect = outputs[0].record.proposedSnapshot.aspects[0] assert schema_aspect assert schema_aspect.fields[0].fieldPath == "address" assert schema_aspect.fields[0].glossaryTerms is None assert schema_aspect.fields[1].fieldPath == "first_name" assert schema_aspect.fields[1].glossaryTerms.terms[0].urn == builder.make_term_urn( "Name" ) assert schema_aspect.fields[1].glossaryTerms.terms[1].urn == builder.make_term_urn( "FirstName" ) assert schema_aspect.fields[2].fieldPath == "last_name" assert schema_aspect.fields[2].glossaryTerms.terms[0].urn == builder.make_term_urn( "Name" ) assert schema_aspect.fields[2].glossaryTerms.terms[1].urn == builder.make_term_urn( "LastName" ) def test_pattern_dataset_schema_tags_transformation(mock_time): dataset_mce = make_generic_dataset( aspects=[ models.SchemaMetadataClass( schemaName="customer", # not used platform=builder.make_data_platform_urn( "hive" ), # important <- platform must be an urn version=0, # when the source system has a notion of versioning of schemas, insert this in, otherwise leave as 0 hash="", # when the source system has a notion of unique schemas identified via hash, include a hash, else leave it as empty string platformSchema=models.OtherSchemaClass( rawSchema="__insert raw schema here__" ), fields=[ models.SchemaFieldClass( fieldPath="address", type=models.SchemaFieldDataTypeClass( type=models.StringTypeClass() ), nativeDataType="VARCHAR(100)", # use this to provide the type of the field in the source system's vernacular ), models.SchemaFieldClass( fieldPath="first_name", type=models.SchemaFieldDataTypeClass( type=models.StringTypeClass() ), nativeDataType="VARCHAR(100)", # use this to provide the type of the field in the source system's vernacular ), models.SchemaFieldClass( fieldPath="last_name", type=models.SchemaFieldDataTypeClass( type=models.StringTypeClass() ), nativeDataType="VARCHAR(100)", # use this to provide the type of the field in the source system's vernacular ), ], ) ] ) transformer = PatternAddDatasetSchemaTags.create( { "tag_pattern": { "rules": { ".*first_name.*": [ builder.make_tag_urn("Name"), builder.make_tag_urn("FirstName"), ], ".*last_name.*": [ builder.make_tag_urn("Name"), builder.make_tag_urn("LastName"), ], } }, }, PipelineContext(run_id="test-schema-tags"), ) outputs = list( transformer.transform( [ RecordEnvelope(input, metadata={}) for input in [dataset_mce, EndOfStream()] ] ) ) assert len(outputs) == 2 # Check that glossary terms were added. schema_aspect = outputs[0].record.proposedSnapshot.aspects[0] assert schema_aspect assert schema_aspect.fields[0].fieldPath == "address" assert schema_aspect.fields[0].globalTags is None assert schema_aspect.fields[1].fieldPath == "first_name" assert schema_aspect.fields[1].globalTags.tags[0].tag == builder.make_tag_urn( "Name" ) assert schema_aspect.fields[1].globalTags.tags[1].tag == builder.make_tag_urn( "FirstName" ) assert schema_aspect.fields[2].fieldPath == "last_name" assert schema_aspect.fields[2].globalTags.tags[0].tag == builder.make_tag_urn( "Name" ) assert schema_aspect.fields[2].globalTags.tags[1].tag == builder.make_tag_urn( "LastName" )
24,568
302
//////////////////////////////////////////////////////////////////////////////////////////////////// // This file is part of CosmoScout VR // // and may be used under the terms of the MIT license. See the LICENSE file for details. // // Copyright: (c) 2019 German Aerospace Center (DLR) // //////////////////////////////////////////////////////////////////////////////////////////////////// #include "ResourceRequestHandler.hpp" #include "../logger.hpp" #include <fstream> #include <include/wrapper/cef_stream_resource_handler.h> namespace cs::gui::detail { //////////////////////////////////////////////////////////////////////////////////////////////////// CefRefPtr<CefResourceHandler> ResourceRequestHandler::GetResourceHandler( CefRefPtr<CefBrowser> /*browser*/, CefRefPtr<CefFrame> /*frame*/, CefRefPtr<CefRequest> request) { std::string url(request->GetURL().ToString()); size_t pathStartIndex = 0; // We handle requests for local files. if (url.find("file://") == 0) { pathStartIndex = 7; } // Here we skip anything marked with { ... } at the beginning of a file URL. This is explained in // the documentation of WebView::setZoomLevel in great detail. The curly braces are %7B and %7D in // encoded URLs. if (url.find("file://%7B") == 0) { pathStartIndex = url.find("%7D") + 3; } if (pathStartIndex > 0) { std::string path(url.substr(pathStartIndex)); std::string ext(url.substr(url.find_last_of('.'))); std::ifstream input(path, std::ios::binary); if (!input) { logger().error("Failed to open gui resource: Cannot open file '{}'!", path); return nullptr; } std::vector<char> buffer( (std::istreambuf_iterator<char>(input)), (std::istreambuf_iterator<char>())); CefRefPtr<CefStreamReader> stream = CefStreamReader::CreateForData(static_cast<void*>(buffer.data()), buffer.size()); std::string mime("text/html"); if (ext == ".png") { mime = "image/png"; } else if (ext == ".jpg" || ext == ".jpeg") { mime = "image/jpg"; } else if (ext == ".js") { mime = "text/javascript"; } else if (ext == ".csv") { mime = "text/csv"; } else if (ext == ".css") { mime = "text/css"; } else if (ext == ".ttf") { mime = "application/x-font-ttf"; } else if (ext == ".woff" || ext == ".woff2") { mime = "application/x-font-woff"; } else if (ext != ".html") { logger().warn("Opening file with unknown extension '{}'!", ext); } return new CefStreamResourceHandler(mime, stream); } return nullptr; } //////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cs::gui::detail
1,010
887
<filename>Sources/App/Classes/Headers/TVCAppearance.h<gh_stars>100-1000 /* ********************************************************************* * _____ _ _ * |_ _|____ _| |_ _ _ __ _| | * | |/ _ \ \/ / __| | | |/ _` | | * | | __/> <| |_| |_| | (_| | | * |_|\___/_/\_\\__|\__,_|\__,_|_| * * Copyright (c) 2018 Codeux Software, LLC & respective contributors. * Please see Acknowledgements.pdf for additional information. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Textual, "Codeux Software, LLC", nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * *********************************************************************** */ #import "TXAppearance.h" NS_ASSUME_NONNULL_BEGIN @interface TVCAppearance : NSObject /* Top level group */ /* Nonnull until -flushAppearanceProperties is called. */ @property (readonly, copy, nullable) NSDictionary<NSString *, id> *appearanceProperties; /* Properties */ @property (readonly) BOOL isHighResolutionAppearance; /* Stateless Accessors */ - (nullable NSColor *)colorForKey:(NSString *)key; - (nullable NSColor *)colorInGroup:(NSDictionary<NSString *, id> *)group withKey:(NSString *)key; - (nullable NSGradient *)gradientForKey:(NSString *)key; - (nullable NSGradient *)gradientInGroup:(NSDictionary<NSString *, id> *)group withKey:(NSString *)key; - (nullable NSFont *)fontForKey:(NSString *)key; - (nullable NSFont *)fontInGroup:(NSDictionary<NSString *, id> *)group withKey:(NSString *)key; - (nullable NSImage *)imageForKey:(NSString *)key; - (nullable NSImage *)imageInGroup:(NSDictionary<NSString *, id> *)group withKey:(NSString *)key; - (NSSize)sizeForKey:(NSString *)key; - (NSSize)sizeInGroup:(NSDictionary<NSString *, id> *)group withKey:(NSString *)key; - (CGFloat)measurementForKey:(NSString *)key; - (CGFloat)measurementInGroup:(NSDictionary<NSString *, id> *)group withKey:(NSString *)key; /* Stateful Accessors */ /* Stateful appearance properties require the properties to have a "activeWindow" and "inactiveWindow" dictionary value which contains the value of the property itself. */ /* Example: <key>exampleStatefulColor</key> <dict> <key>activeWindow</key> <dict> <key>type</key> <integer>1</integer> <key>value</key> <string>0.0 0.3</string> </dict> <key>inactiveWindow</key> <dict> <key>type</key> <integer>1</integer> <key>value</key> <string>1.0</string> </dict> </dict> */ - (nullable NSColor *)colorForKey:(NSString *)key forActiveWindow:(BOOL)forActiveWindow; - (nullable NSColor *)colorInGroup:(NSDictionary<NSString *, id> *)group withKey:(NSString *)key forActiveWindow:(BOOL)forActiveWindow; - (nullable NSGradient *)gradientForKey:(NSString *)key forActiveWindow:(BOOL)forActiveWindow; - (nullable NSGradient *)gradientInGroup:(NSDictionary<NSString *, id> *)group withKey:(NSString *)key forActiveWindow:(BOOL)forActiveWindow; - (nullable NSFont *)fontForKey:(NSString *)key forActiveWindow:(BOOL)forActiveWindow; - (nullable NSFont *)fontInGroup:(NSDictionary<NSString *, id> *)group withKey:(NSString *)key forActiveWindow:(BOOL)forActiveWindow; - (nullable NSImage *)imageForKey:(NSString *)key forActiveWindow:(BOOL)forActiveWindow; - (nullable NSImage *)imageInGroup:(NSDictionary<NSString *, id> *)group withKey:(NSString *)key forActiveWindow:(BOOL)forActiveWindow; @end /* TVCApplicationAppearance keeps a copy of TXAppearance proeprties. The properties it keeps are those that were set when the object was created. To update to the latest properties, create a new instance of the object. */ @interface TVCApplicationAppearance : TVCAppearance <TXAppearanceProperties> @end NS_ASSUME_NONNULL_END
1,675
1,350
<filename>sdk/synapse/azure-resourcemanager-synapse/src/samples/java/com/azure/resourcemanager/synapse/generated/WorkspaceManagedSqlServerExtendedBlobAuditingPoliciesGetSamples.java<gh_stars>1000+ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. // Code generated by Microsoft (R) AutoRest Code Generator. package com.azure.resourcemanager.synapse.generated; import com.azure.core.util.Context; import com.azure.resourcemanager.synapse.models.BlobAuditingPolicyName; /** Samples for WorkspaceManagedSqlServerExtendedBlobAuditingPolicies Get. */ public final class WorkspaceManagedSqlServerExtendedBlobAuditingPoliciesGetSamples { /* * x-ms-original-file: specification/synapse/resource-manager/Microsoft.Synapse/stable/2021-06-01/examples/GetWorkspaceManagedSqlExtendedServerBlobAuditingSettings.json */ /** * Sample code: Get workspace managed sql servers' extended blob auditing settings. * * @param manager Entry point to SynapseManager. */ public static void getWorkspaceManagedSqlServersExtendedBlobAuditingSettings( com.azure.resourcemanager.synapse.SynapseManager manager) { manager .workspaceManagedSqlServerExtendedBlobAuditingPolicies() .getWithResponse("wsg-7398", "testWorkspace", BlobAuditingPolicyName.DEFAULT, Context.NONE); } }
463
348
<gh_stars>100-1000 {"nom":"Valdrôme","circ":"3ème circonscription","dpt":"Drôme","inscrits":132,"abs":62,"votants":70,"blancs":16,"nuls":1,"exp":53,"res":[{"nuance":"REM","nom":"<NAME>","voix":40},{"nuance":"LR","nom":"<NAME>","voix":13}]}
100
1,723
<filename>lib/os/os_string.hpp<gh_stars>1000+ /************************************************************************** * * Copyright 2011 <NAME> * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. * **************************************************************************/ /* * String manipulation. */ #pragma once #include <assert.h> #include <stdarg.h> #include <stdio.h> #include <stddef.h> #ifdef __MINGW32__ // Some versions of MinGW are missing _vscprintf's declaration, although they // still provide the symbol in the import library. extern "C" _CRTIMP int _vscprintf(const char *format, va_list argptr); #endif #ifndef va_copy #ifdef __va_copy #define va_copy(dest, src) __va_copy((dest), (src)) #else #define va_copy(dest, src) (dest) = (src) #endif #endif #include <vector> #include "os.hpp" #ifdef _WIN32 #define OS_DIR_SEP '\\' #define OS_PATH_SEP ';' #else /* !_WIN32 */ #define OS_DIR_SEP '/' #define OS_PATH_SEP ':' #endif /* !_WIN32 */ namespace os { /** * Class to represent zero-terminated strings, based upon std::vector<char>, * suitable for passing strings or paths to/from OS calls. * * Both Win32 and POSIX APIs return strings as zero length buffers. Although * std::string provides an easy method to obtain a read-only pointer to a zero * terminated string, it lacks the ability to return a read-write pointer. So * there is no way to tell OS calls to write into a std::string directly -- a * temporary malloc'ed string would be necessary --, which would be * unnecessarily inefficient, specially considering that these strings would * ultimately passed back to the OS, which would again expect zero-terminated * strings. * * This class is not, however, a full replacement for std::string, which should * be otherwise used whenever possible. */ class String { protected: typedef std::vector<char> Buffer; /** * The buffer's last element is always the '\0' character, therefore the * buffer must never be empty. */ Buffer buffer; Buffer::iterator find(char c) { Buffer::iterator it = buffer.begin(); assert(it != buffer.end()); while (it != buffer.end()) { if (*it == c) { return it; } ++it; } return buffer.end(); } Buffer::iterator rfind(char c) { Buffer::iterator it = buffer.end(); // Skip trailing '\0' assert(it != buffer.begin()); --it; assert(*it == '\0'); while (it != buffer.begin()) { --it; if (*it == c) { return it; } } return buffer.end(); } String(size_t size) : buffer(size) { } char *buf(void) { return &buffer[0]; } inline bool isSep(char c) { if (c == '/') { return true; } #ifdef _WIN32 if (c == '\\') { return true; } #endif return false; } public: Buffer::iterator rfindSep(bool skipTrailing = true) { Buffer::iterator it = end(); // Skip trailing separators if (skipTrailing) { while (it != buffer.begin()) { --it; if (isSep(*it)) { // Halt if find the root if (it == buffer.begin()) { return it; } } else { break; } } } // Advance to the last separator while (it != buffer.begin()) { --it; if (isSep(*it)) { return it; } } return end(); } /* * Constructors */ String(void) { buffer.push_back(0); } String(const char *s) : buffer(s, s + strlen(s) + 1) {} String(const String &other) : buffer(other.buffer) {} template <class InputIterator> String(InputIterator first, InputIterator last) : buffer(first, last) { buffer.push_back(0); } /** * From a printf-like format string */ static String format(const char *format, ...) #if defined(__MINGW32__) __attribute__ ((format (__MINGW_PRINTF_FORMAT, 1, 2))) #elif defined(__GNUC__) __attribute__ ((format (printf, 1, 2))) #endif { va_list args; va_start(args, format); int length; va_list args_copy; va_copy(args_copy, args); #ifdef _WIN32 /* We need to use _vscprintf to calculate the length as vsnprintf returns -1 * if the number of characters to write is greater than count. */ length = _vscprintf(format, args_copy); #else char dummy; length = vsnprintf(&dummy, sizeof dummy, format, args_copy); #endif va_end(args_copy); assert(length >= 0); size_t size = size_t(length) + 1; String path(size); va_start(args, format); vsnprintf(path.buf(), size, format, args); va_end(args); return path; } /* * Conversion to ordinary C strings. */ const char *str(void) const { assert(buffer.back() == 0); return &buffer[0]; } operator const char *(void) const { return str(); } /* * Iterators */ typedef Buffer::const_iterator const_iterator; typedef Buffer::iterator iterator; const_iterator begin(void) const { return buffer.begin(); } iterator begin(void) { return buffer.begin(); } const_iterator end(void) const { const_iterator it = buffer.end(); assert(it != buffer.begin()); --it; // skip null return it; } iterator end(void) { iterator it = buffer.end(); assert(it != buffer.begin()); --it; // skip null return it; } /* * Operations */ void insert(iterator position, char c) { buffer.insert(position, c); } template <class InputIterator> void insert(iterator position, InputIterator first, InputIterator last) { buffer.insert(position, first, last); } void insert(iterator position, const char *s) { assert(s); insert(position, s, s + strlen(s)); } void insert(iterator position, const String & other) { insert(position, other.begin(), other.end()); } void append(char c) { insert(end(), c); } template <class InputIterator> void append(InputIterator first, InputIterator last) { insert(end(), first, last); } void append(const char *s) { insert(end(), s); } void append(const String & other) { insert(end(), other); } template <class InputIterator> void erase(InputIterator first, InputIterator last) { buffer.erase(first, last); } /** * Get a writable buffer with the specified size. * * truncate() must be called after the buffer is written, and before any other * method is called. * * Between the call to buf() and truncate() methods, the `buffer.back() == * 0` invariant will not hold true. */ char *buf(size_t size) { buffer.resize(size); return &buffer[0]; } size_t length(void) const { size_t size = buffer.size(); assert(size > 0); assert(buffer[size - 1] == 0); return size - 1; } /** * Truncate the string to the specified length. */ void truncate(size_t length) { assert(length < buffer.size()); buffer[length] = 0; assert(strlen(&buffer[0]) == length); buffer.resize(length + 1); } /** * Truncate the string to the first zero character. */ void truncate(void) { truncate(strlen(&buffer[0])); } /* * Path manipulation */ bool exists(void) const; /* Trim directory (leaving base filename). */ void trimDirectory(void) { iterator sep = rfindSep(); if (sep != end()) { buffer.erase(buffer.begin(), sep + 1); } } /* Trim filename component (leaving containing directory). * * - trailing separators are ignored * - a path with no separator at all yields "." * - a path consisting of just the root directory is left unchanged */ void trimFilename(void) { iterator sep = rfindSep(); // No separator found, so return '.' if (sep == end()) { buffer.resize(2); buffer[0] = '.'; buffer[1] = 0; return; } // Root. Nothing to do. if (sep == buffer.begin()) { return; } // Trim filename buffer.erase(sep, end()); } void trimExtension(void) { iterator dot = rfind('.'); if (dot != buffer.end()) { buffer.erase(dot, end()); } } void join(const String & other) { if (length() && end()[-1] != OS_DIR_SEP) { append(OS_DIR_SEP); } append(other.begin(), other.end()); } }; String getProcessName(); String getCurrentDir(); String getConfigDir(); bool createDirectory(const String &path); bool copyFile(const String &srcFileName, const String &dstFileName, bool override = true); bool removeFile(const String &fileName); String getTemporaryDirectoryPath(void); } /* namespace os */
4,349
1,162
<reponame>civitaspo/digdag package io.digdag.client.api; import com.fasterxml.jackson.databind.annotation.JsonDeserialize; import org.immutables.value.Value; @Value.Immutable @JsonDeserialize(as = ImmutableRestVersionCheckResult.class) public interface RestVersionCheckResult { String getServerVersion(); boolean getUpgradeRecommended(); boolean getApiCompatible(); static ImmutableRestVersionCheckResult.Builder builder() { return ImmutableRestVersionCheckResult.builder(); } }
173