max_stars_count
int64
301
224k
text
stringlengths
6
1.05M
token_count
int64
3
727k
696
<reponame>hwang-pku/Strata<filename>modules/product/src/test/java/com/opengamma/strata/product/common/BuySellTest.java /* * Copyright (C) 2015 - present by OpenGamma Inc. and the OpenGamma group of companies * * Please see distribution for license. */ package com.opengamma.strata.product.common; import static com.opengamma.strata.basics.currency.Currency.GBP; import static com.opengamma.strata.collect.TestHelper.assertJodaConvert; import static com.opengamma.strata.collect.TestHelper.assertSerialization; import static com.opengamma.strata.collect.TestHelper.coverEnum; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException; import static org.assertj.core.data.Offset.offset; import java.util.Locale; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; import com.opengamma.strata.basics.currency.CurrencyAmount; /** * Test {@link BuySell}. */ public class BuySellTest { //------------------------------------------------------------------------- @Test public void test_ofBuy() { assertThat(BuySell.ofBuy(true)).isEqualTo(BuySell.BUY); assertThat(BuySell.ofBuy(false)).isEqualTo(BuySell.SELL); } //------------------------------------------------------------------------- @Test public void test_normalize_sell_double() { assertThat(BuySell.SELL.normalize(1d)).isCloseTo(-1d, offset(0d)); assertThat(BuySell.SELL.normalize(0d)).isCloseTo(0d, offset(0d)); assertThat(BuySell.SELL.normalize(-0d)).isCloseTo(0d, offset(0d)); assertThat(BuySell.SELL.normalize(-1d)).isCloseTo(-1d, offset(0d)); } @Test public void test_normalize_sell_amount() { assertThat(BuySell.SELL.normalize(CurrencyAmount.of(GBP, 1d))).isEqualTo(CurrencyAmount.of(GBP, -1d)); assertThat(BuySell.SELL.normalize(CurrencyAmount.of(GBP, 0d))).isEqualTo(CurrencyAmount.of(GBP, 0d)); assertThat(BuySell.SELL.normalize(CurrencyAmount.of(GBP, -1d))).isEqualTo(CurrencyAmount.of(GBP, -1d)); } @Test public void test_normalize_buy_double() { assertThat(BuySell.BUY.normalize(1d)).isCloseTo(1d, offset(0d)); assertThat(BuySell.BUY.normalize(0d)).isCloseTo(0d, offset(0d)); assertThat(BuySell.BUY.normalize(-0d)).isCloseTo(0d, offset(0d)); assertThat(BuySell.BUY.normalize(-1d)).isCloseTo(1d, offset(0d)); } @Test public void test_normalize_buy_amount() { assertThat(BuySell.BUY.normalize(CurrencyAmount.of(GBP, 1d))).isEqualTo(CurrencyAmount.of(GBP, 1d)); assertThat(BuySell.BUY.normalize(CurrencyAmount.of(GBP, 0d))).isEqualTo(CurrencyAmount.of(GBP, 0d)); assertThat(BuySell.BUY.normalize(CurrencyAmount.of(GBP, -1d))).isEqualTo(CurrencyAmount.of(GBP, 1d)); } @Test public void test_isBuy() { assertThat(BuySell.BUY.isBuy()).isTrue(); assertThat(BuySell.SELL.isBuy()).isFalse(); } @Test public void test_isSell() { assertThat(BuySell.BUY.isSell()).isFalse(); assertThat(BuySell.SELL.isSell()).isTrue(); } @Test public void test_opposite() { assertThat(BuySell.BUY.opposite()).isEqualTo(BuySell.SELL); assertThat(BuySell.SELL.opposite()).isEqualTo(BuySell.BUY); } //------------------------------------------------------------------------- public static Object[][] data_name() { return new Object[][] { {BuySell.BUY, "Buy"}, {BuySell.SELL, "Sell"}, }; } @ParameterizedTest @MethodSource("data_name") public void test_toString(BuySell convention, String name) { assertThat(convention.toString()).isEqualTo(name); } @ParameterizedTest @MethodSource("data_name") public void test_of_lookup(BuySell convention, String name) { assertThat(BuySell.of(name)).isEqualTo(convention); } @ParameterizedTest @MethodSource("data_name") public void test_of_lookupUpperCase(BuySell convention, String name) { assertThat(BuySell.of(name.toUpperCase(Locale.ENGLISH))).isEqualTo(convention); } @ParameterizedTest @MethodSource("data_name") public void test_of_lookupLowerCase(BuySell convention, String name) { assertThat(BuySell.of(name.toLowerCase(Locale.ENGLISH))).isEqualTo(convention); } @Test public void test_of_lookup_notFound() { assertThatIllegalArgumentException() .isThrownBy(() -> BuySell.of("Rubbish")); } @Test public void test_of_lookup_null() { assertThatIllegalArgumentException() .isThrownBy(() -> BuySell.of(null)); } //------------------------------------------------------------------------- @Test public void coverage() { coverEnum(BuySell.class); } @Test public void test_serialization() { assertSerialization(BuySell.BUY); } @Test public void test_jodaConvert() { assertJodaConvert(BuySell.class, BuySell.BUY); } }
1,840
423
<gh_stars>100-1000 #include <functorch/csrc/Interpreter.h> #include <functorch/csrc/BatchedTensorImpl.h> #include <functorch/csrc/TensorWrapper.h> #include <functorch/csrc/VmapInterpreter.h> #include <functorch/csrc/FunctionalizeInterpreter.h> #include <functorch/csrc/ADInterpreters.h> namespace at { namespace functorch { static DispatchKeySet get_all_dynlayer_keyset() { // NB: FULL_AFTER does not include the dispatch key // "all dispatch keys between DynamicLayer{Front, Back}Mode, inclusive" auto result = DispatchKeySet(DispatchKeySet::FULL_AFTER, kDynamicLayerFrontModeKey) - DispatchKeySet(DispatchKeySet::FULL_AFTER, kDynamicLayerBackModeKey); result = result | DispatchKeySet({kDynamicLayerFrontModeKey}); // Hack: don't handle the autocast dispatch keys. Their interaction with functorch // is weird. result = result - autocast_dispatch_keyset; // Hack: don't handle kVmapModeKey. We need a better way of modeling this. // In e.g. grad(vmap(f)), kVmapModeKey makes it so that all random operations, // even after we are done handling the vmap layer, error out. result = result.remove(kVmapModeKey); return result; } // TODO: This should be constexpr, but there are some methods // of DispatchKeySet that haven't been marked constexpr yet. static DispatchKeySet all_dynlayer_keyset = get_all_dynlayer_keyset(); static DispatchKeySet keysForEnteringDynamicLayer(TransformType key) { if (key == TransformType::Vmap) { // NB: Does not include kVmapModeKey. We may modulate the key when // constructing the DynamicLayer, but we don't control it when entering/exiting // the DynamicLayer. return DispatchKeySet({kBatchedKey}); } else if (key == TransformType::Grad || key == TransformType::Jvp) { return autograd_dispatch_keyset.add(DispatchKey::ADInplaceOrView); } else if (key == TransformType::Functionalize) { return DispatchKeySet(DispatchKey::Functionalize); } else { TORCH_INTERNAL_ASSERT(false, "Unsupported key: ", key); } } DispatchKeySet keysToExcludeWhenEnteringDynamicLayer(TransformType key) { DispatchKeySet exclude = all_dynlayer_keyset; exclude = exclude.remove(kDynamicLayerBackModeKey); exclude = exclude - keysForEnteringDynamicLayer(key); return exclude; } void setup_dispatch_key_tls(DispatchKeySet exclude, DispatchKeySet include) { auto local_keyset = c10::impl::tls_local_dispatch_key_set(); local_keyset.excluded_ = local_keyset.excluded_ | exclude; local_keyset.included_ = local_keyset.included_ | include; c10::impl::_force_tls_local_dispatch_key_set(local_keyset); } std::ostream& operator<<(std::ostream& os, const TransformType& t) { switch (t) { case TransformType::Torch: os << "Torch"; break; case TransformType::Vmap: os << "Vmap"; break; case TransformType::Grad: os << "Grad"; break; case TransformType::Jvp: os << "Jvp"; break; case TransformType::Functionalize: os << "Functionalize"; break; default: TORCH_INTERNAL_ASSERT(false); } return os; } void sanityCheckStack(const c10::OperatorHandle& op, torch::jit::Stack* stack) { auto num_args = op.schema().arguments().size(); foreachTensorInplace(*stack, stack->size() - num_args, stack->size(), [](const Tensor& tensor) { auto* wrapper = maybeGetTensorWrapper(tensor); TORCH_INTERNAL_ASSERT(wrapper == nullptr); auto* batched = maybeGetBatchedImpl(tensor); TORCH_INTERNAL_ASSERT(batched == nullptr); return tensor; }); } #define INTERPRETER_DISPATCH(type, method) \ switch (key()) { \ case TransformType::Vmap: \ return VmapInterpreterPtr(this). method; \ case TransformType::Grad: \ return GradInterpreterPtr(this). method; \ case TransformType::Jvp: \ return JvpInterpreterPtr(this). method; \ case TransformType::Functionalize: \ return FunctionalizeInterpreterPtr(this). method; \ default: \ TORCH_INTERNAL_ASSERT(false, "Unrecognized transform"); \ } void Interpreter::process(const c10::OperatorHandle& op, torch::jit::Stack* stack) { INTERPRETER_DISPATCH(key_, SINGLE_ARG(processImpl(op, stack))); } void Interpreter::sendToNextInterpreter(const c10::OperatorHandle& op, torch::jit::Stack* stack) { INTERPRETER_DISPATCH(key_, SINGLE_ARG(sendToNextInterpreterImpl(op, stack))); } }}
1,578
346
import json import logging import os import shutil from joblib import Parallel, delayed from selenium.webdriver import Chrome from .CompanyScraper import CompanyScraper from .ConnectionScraper import ConnectionScraper from .ProfileScraper import ProfileScraper from .utils import HEADLESS_OPTIONS, split_lists logger = logging.getLogger(__name__) def scrape_in_parallel( scraper_type, items, output_file, num_instances, temp_dir='tmp_data', driver=Chrome, driver_options=HEADLESS_OPTIONS, **kwargs ): chunked_items = split_lists(items, num_instances) os.mkdir(temp_dir) Parallel(n_jobs=num_instances)(delayed(scrape_job)( scraper_type=scraper_type, output_file=temp_dir + '/{}.json'.format(i), items=chunked_items[i], driver=driver, driver_options=driver_options, **kwargs ) for i in range(num_instances)) all_data = {} for i in range(num_instances): with open(temp_dir + '/{}.json'.format(i), 'r') as data: all_data.update(json.load(data)) if output_file: with open(output_file, 'w') as out: json.dump(all_data, out) shutil.rmtree(temp_dir) return all_data def scrape_job(scraper_type, items, output_file, **scraper_kwargs): scraper = scraper_type(**scraper_kwargs) data = {} for item in items: try: if scraper_type == CompanyScraper: data[item] = scraper.scrape(company=item).to_dict() elif scraper_type == ConnectionScraper: data[item] = scraper.scrape(user=item) elif scraper_type == ProfileScraper: data[item] = scraper.scrape(user=item).to_dict() except Exception as e: logger.exception("%s could not be scraped: %s", item, e) with open(output_file, 'w') as out: json.dump(data, out)
828
1,519
<reponame>newjunwei/llvm-tutor //============================================================================= // FILE: // MergeBB.cpp // // DESCRIPTION: // Merges identical basic blocks into one. As an example, consider the CFG // below. After the transformation, BB1 is merged into BB2. // ---------------------------------------------------------------------------- // CFG BEFORE: CFG AFTER: // ---------------------------------------------------------------------------- // [BB0] [other BBs] [BB0] [other BBs] | // \ | \ | | // [BB1][BB2] [other BBs] ---[BB2] [other BBs] | // \ | / | / | // [ BBsucc ] [ BBsucc ] | // / | \ / | \ V // ---------------------------------------------------------------------------- // Only qualifying basic blocks are merged. The edge(s) from (potentially // multiple) BB0 to BB1, must be one of the following instructions: // * conditional branch // * unconditional branch, and // * switch // For the edges from BB1 to BBsucc and BB2 to BBsucc, only unconditional // branch instructions are allowed. Finally, BB1 is identical to BB2 iff all // instructions in BB1 are identical to the instructions in BB2. For finer // details please consult the implementation. // // This pass will to some extent revert the modifications introduced by // DuplicateBB. The qualifying clones (lt-clone-1-BBId and lt-clone-2-BBid) // *will indeed* be merged, but the lt-if-then-else and lt-tail blocks (also // introduced by DuplicateBB) will be updated, but not removed. Please keep // this in mind when running the passes in a chain. // // USAGE: // 1. Legacy Pass Manager: // $ opt -load <BUILD_DIR>/lib/libMergeBB.so `\` // -legacy-merge-bb -S <bitcode-file> // 2. New Pass Manager // $ opt -load-pass-plugin <BUILD_DIR>/lib/libMergeBB.so `\` // -passes=merge-bb -S <bitcode-file> // // License: MIT //============================================================================= #include "MergeBB.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/Passes/PassBuilder.h" #include "llvm/Passes/PassPlugin.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/ADT/Statistic.h" #include "llvm/Support/Debug.h" using namespace llvm; #define DEBUG_TYPE "MergeBB" STATISTIC(NumDedupBBs, "Number of basic blocks merged"); STATISTIC(OverallNumOfUpdatedBranchTargets, "Number of updated branch targets"); //----------------------------------------------------------------------------- // MergeBB Implementation //----------------------------------------------------------------------------- bool MergeBB::canRemoveInst(const Instruction *Inst) { assert(Inst->hasOneUse() && "Inst needs to have exactly one use"); auto *PNUse = dyn_cast<PHINode>(*Inst->user_begin()); auto *Succ = Inst->getParent()->getTerminator()->getSuccessor(0); auto *User = cast<Instruction>(*Inst->user_begin()); bool SameParentBB = (User->getParent() == Inst->getParent()); bool UsedInPhi = (PNUse && PNUse->getParent() == Succ && PNUse->getIncomingValueForBlock(Inst->getParent()) == Inst); return UsedInPhi || SameParentBB; } bool MergeBB::canMergeInstructions(ArrayRef<Instruction *> Insts) { const Instruction *Inst1 = Insts[0]; const Instruction *Inst2 = Insts[1]; if (!Inst1->isSameOperationAs(Inst2)) return false; // Each instruction must have exactly zero or one use. bool HasUse = !Inst1->user_empty(); for (auto *I : Insts) { if (HasUse && !I->hasOneUse()) return false; if (!HasUse && !I->user_empty()) return false; } // Not all instructions that have one use can be merged. Make sure that // instructions that have one use can be safely deleted. if (HasUse) { if (!canRemoveInst(Inst1) || !canRemoveInst(Inst2)) return false; } // Make sure that Inst1 and Inst2 have identical operands. assert(Inst2->getNumOperands() == Inst1->getNumOperands()); auto NumOpnds = Inst1->getNumOperands(); for (unsigned OpndIdx = 0; OpndIdx != NumOpnds; ++OpndIdx) { if (Inst2->getOperand(OpndIdx) != Inst1->getOperand(OpndIdx)) return false; } return true; } // Get the number of non-debug instructions in BB static unsigned getNumNonDbgInstrInBB(BasicBlock *BB) { unsigned Count = 0; for (Instruction &Instr : *BB) if (!isa<DbgInfoIntrinsic>(Instr)) Count++; return Count; } unsigned MergeBB::updateBranchTargets(BasicBlock *BBToErase, BasicBlock *BBToRetain) { SmallVector<BasicBlock *, 8> BBToUpdate(predecessors(BBToErase)); LLVM_DEBUG(dbgs() << "DEDUP BB: merging duplicated blocks (" << BBToErase->getName() << " into " << BBToRetain->getName() << ")\n"); unsigned UpdatedTargetsCount = 0; for (BasicBlock *BB0 : BBToUpdate) { // The terminator is either a branch (conditional or unconditional) or a // switch statement. One of its targets should be BBToErase. Replace // that target with BBToRetain. Instruction *Term = BB0->getTerminator(); for (unsigned OpIdx = 0, NumOpnds = Term->getNumOperands(); OpIdx != NumOpnds; ++OpIdx) { if (Term->getOperand(OpIdx) == BBToErase) { Term->setOperand(OpIdx, BBToRetain); UpdatedTargetsCount++; } } } return UpdatedTargetsCount; } bool MergeBB::mergeDuplicatedBlock(BasicBlock *BB1, SmallPtrSet<BasicBlock *, 8> &DeleteList) { // Do not optimize the entry block if (BB1 == &BB1->getParent()->getEntryBlock()) return false; // Only merge CFG edges of unconditional branch BranchInst *BB1Term = dyn_cast<BranchInst>(BB1->getTerminator()); if (!(BB1Term && BB1Term->isUnconditional())) return false; // Do not optimize non-branch and non-switch CFG edges (to keep things // relatively simple) for (auto *B : predecessors(BB1)) if (!(isa<BranchInst>(B->getTerminator()) || isa<SwitchInst>(B->getTerminator()))) return false; BasicBlock *BBSucc = BB1Term->getSuccessor(0); BasicBlock::iterator II = BBSucc->begin(); const PHINode *PN = dyn_cast<PHINode>(II); Value *InValBB1 = nullptr; Instruction *InInstBB1 = nullptr; BBSucc->getFirstNonPHI(); if (nullptr != PN) { // Do not optimize if multiple PHI instructions exist in the successor (to // keep things relatively simple) if (++II != BBSucc->end() && isa<PHINode>(II)) return false; InValBB1 = PN->getIncomingValueForBlock(BB1); InInstBB1 = dyn_cast<Instruction>(InValBB1); } unsigned BB1NumInst = getNumNonDbgInstrInBB(BB1); for (auto *BB2 : predecessors(BBSucc)) { // Do not optimize the entry block if (BB2 == &BB2->getParent()->getEntryBlock()) continue; // Only merge CFG edges of unconditional branch BranchInst *BB2Term = dyn_cast<BranchInst>(BB2->getTerminator()); if (!(BB2Term && BB2Term->isUnconditional())) continue; // Do not optimize non-branch and non-switch CFG edges (to keep things // relatively simple) for (auto *B : predecessors(BB2)) if (!(isa<BranchInst>(B->getTerminator()) || isa<SwitchInst>(B->getTerminator()))) continue; // Skip basic blocks that have already been marked for merging if (DeleteList.end() != DeleteList.find(BB2)) continue; // Make sure that BB2 != BB1 if (BB2 == BB1) continue; // BB1 and BB2 are definitely different if the number of instructions is // not identical if (BB1NumInst != getNumNonDbgInstrInBB(BB2)) continue; // Control flow can be merged if incoming values to the PHI node // at the successor are same values or both defined in the BBs to merge. // For the latter case, canMergeInstructions executes further analysis. if (nullptr != PN) { Value *InValBB2 = PN->getIncomingValueForBlock(BB2); Instruction *InInstBB2 = dyn_cast<Instruction>(InValBB2); bool areValuesSimilar = (InValBB1 == InValBB2); bool bothValuesDefinedInParent = ((InInstBB1 && InInstBB1->getParent() == BB1) && (InInstBB2 && InInstBB2->getParent() == BB2)); if (!areValuesSimilar && !bothValuesDefinedInParent) continue; } // Finally, check that all instructions in BB1 and BB2 are identical LockstepReverseIterator LRI(BB1, BB2); while (LRI.isValid() && canMergeInstructions(*LRI)) { --LRI; } // Valid iterator means that a mismatch was found in middle of BB if (LRI.isValid()) continue; // It is safe to de-duplicate - do so. unsigned UpdatedTargets = updateBranchTargets(BB1, BB2); assert(UpdatedTargets && "No branch target was updated"); OverallNumOfUpdatedBranchTargets += UpdatedTargets; DeleteList.insert(BB1); NumDedupBBs++; return true; } return false; } PreservedAnalyses MergeBB::run(llvm::Function &Func, llvm::FunctionAnalysisManager &) { bool Changed = false; SmallPtrSet<BasicBlock *, 8> DeleteList; for (auto &BB : Func) { Changed |= mergeDuplicatedBlock(&BB, DeleteList); } for (BasicBlock *BB : DeleteList) { DeleteDeadBlock(BB); } return (Changed ? llvm::PreservedAnalyses::none() : llvm::PreservedAnalyses::all()); } bool LegacyMergeBB::runOnFunction(llvm::Function &Func) { bool Changed = false; SmallPtrSet<BasicBlock *, 8> DeleteList; for (auto &BB : Func) { Changed |= Impl.mergeDuplicatedBlock(&BB, DeleteList); } for (BasicBlock *BB : DeleteList) { DeleteDeadBlock(BB); } return Changed; } //----------------------------------------------------------------------------- // New PM Registration //----------------------------------------------------------------------------- llvm::PassPluginLibraryInfo getMergeBBPluginInfo() { return {LLVM_PLUGIN_API_VERSION, "MergeBB", LLVM_VERSION_STRING, [](PassBuilder &PB) { PB.registerPipelineParsingCallback( [](StringRef Name, FunctionPassManager &FPM, ArrayRef<PassBuilder::PipelineElement>) { if (Name == "merge-bb") { FPM.addPass(MergeBB()); return true; } return false; }); }}; } extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() { return getMergeBBPluginInfo(); } //----------------------------------------------------------------------------- // Legacy PM Registration //----------------------------------------------------------------------------- char LegacyMergeBB::ID = 0; static RegisterPass<LegacyMergeBB> X(/*PassArg=*/"legacy-merge-bb", /*Name=*/"Mixed Boolean Arithmetic Substitution", /*CFGOnly=*/false, /*is_analysis=*/false); //------------------------------------------------------------------------------ // Helper data structures //------------------------------------------------------------------------------ LockstepReverseIterator::LockstepReverseIterator(BasicBlock *BB1In, BasicBlock *BB2In) : BB1(BB1In), BB2(BB2In), Fail(false) { Insts.clear(); Instruction *InstBB1 = getLastNonDbgInst(BB1); if (nullptr == InstBB1) Fail = true; Instruction *InstBB2 = getLastNonDbgInst(BB2); if (nullptr == InstBB2) Fail = true; Insts.push_back(InstBB1); Insts.push_back(InstBB2); } Instruction *LockstepReverseIterator::getLastNonDbgInst(BasicBlock *BB) { Instruction *Inst = BB->getTerminator(); do { Inst = Inst->getPrevNode(); } while (Inst && isa<DbgInfoIntrinsic>(Inst)); return Inst; } void LockstepReverseIterator::operator--() { if (Fail) return; for (auto *&Inst : Insts) { do { Inst = Inst->getPrevNode(); } while (Inst && isa<DbgInfoIntrinsic>(Inst)); if (!Inst) { // Already at the beginning of BB Fail = true; return; } } }
4,644
9,680
<filename>test/vso_tools/build_wheel.py from _common import build_wheel build_wheel()
30
2,177
/** * 对于mapList的处理 */ package org.nutz.mapl;
27
72,551
<filename>test/ScanDependencies/Inputs/CHeaders/H.h #if __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 110000 #include "I.h" #endif void funcH(void);
67
2,151
/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package android.service.notification; import android.annotation.SdkConstant; import android.annotation.SystemApi; import android.content.ComponentName; import android.content.Context; import android.content.Intent; import android.net.Uri; import android.os.Bundle; import android.os.Handler; import android.os.IBinder; import android.os.Looper; import android.os.Message; import android.os.Parcel; import android.os.Parcelable; import android.os.RemoteException; import android.util.Log; import com.android.internal.os.SomeArgs; import java.util.List; /** * A service that helps the user manage notifications. This class is only used to * extend the framework service and may not be implemented by non-framework components. * @hide */ @SystemApi public abstract class NotificationRankerService extends NotificationListenerService { private static final String TAG = "NotificationRankers"; /** * The {@link Intent} that must be declared as handled by the service. */ @SdkConstant(SdkConstant.SdkConstantType.SERVICE_ACTION) public static final String SERVICE_INTERFACE = "android.service.notification.NotificationRankerService"; /** Notification was canceled by the status bar reporting a click. */ public static final int REASON_DELEGATE_CLICK = 1; /** Notification was canceled by the status bar reporting a user dismissal. */ public static final int REASON_DELEGATE_CANCEL = 2; /** Notification was canceled by the status bar reporting a user dismiss all. */ public static final int REASON_DELEGATE_CANCEL_ALL = 3; /** Notification was canceled by the status bar reporting an inflation error. */ public static final int REASON_DELEGATE_ERROR = 4; /** Notification was canceled by the package manager modifying the package. */ public static final int REASON_PACKAGE_CHANGED = 5; /** Notification was canceled by the owning user context being stopped. */ public static final int REASON_USER_STOPPED = 6; /** Notification was canceled by the user banning the package. */ public static final int REASON_PACKAGE_BANNED = 7; /** Notification was canceled by the app canceling this specific notification. */ public static final int REASON_APP_CANCEL = 8; /** Notification was canceled by the app cancelling all its notifications. */ public static final int REASON_APP_CANCEL_ALL = 9; /** Notification was canceled by a listener reporting a user dismissal. */ public static final int REASON_LISTENER_CANCEL = 10; /** Notification was canceled by a listener reporting a user dismiss all. */ public static final int REASON_LISTENER_CANCEL_ALL = 11; /** Notification was canceled because it was a member of a canceled group. */ public static final int REASON_GROUP_SUMMARY_CANCELED = 12; /** Notification was canceled because it was an invisible member of a group. */ public static final int REASON_GROUP_OPTIMIZATION = 13; /** Notification was canceled by the device administrator suspending the package. */ public static final int REASON_PACKAGE_SUSPENDED = 14; /** Notification was canceled by the owning managed profile being turned off. */ public static final int REASON_PROFILE_TURNED_OFF = 15; /** Autobundled summary notification was canceled because its group was unbundled */ public static final int REASON_UNAUTOBUNDLED = 16; private Handler mHandler; /** @hide */ @Override public void registerAsSystemService(Context context, ComponentName componentName, int currentUser) { throw new UnsupportedOperationException("the ranker lifecycle is managed by the system."); } /** @hide */ @Override public void unregisterAsSystemService() { throw new UnsupportedOperationException("the ranker lifecycle is managed by the system."); } @Override protected void attachBaseContext(Context base) { super.attachBaseContext(base); mHandler = new MyHandler(getContext().getMainLooper()); } @Override public final IBinder onBind(Intent intent) { if (mWrapper == null) { mWrapper = new NotificationRankingServiceWrapper(); } return mWrapper; } /** * A notification was posted by an app. Called before alert. * * @param sbn the new notification * @param importance the initial importance of the notification. * @param user true if the initial importance reflects an explicit user preference. * @return an adjustment or null to take no action, within 100ms. */ abstract public Adjustment onNotificationEnqueued(StatusBarNotification sbn, int importance, boolean user); /** * The visibility of a notification has changed. * * @param key the notification key * @param time milliseconds since midnight, January 1, 1970 UTC. * @param visible true if the notification became visible, false if hidden. */ public void onNotificationVisibilityChanged(String key, long time, boolean visible) { // Do nothing, Override this to collect visibility statistics. } /** * The user clicked on a notification. * * @param key the notification key * @param time milliseconds since midnight, January 1, 1970 UTC. */ public void onNotificationClick(String key, long time) { // Do nothing, Override this to collect click statistics } /** * The user clicked on a notification action. * * @param key the notification key * @param time milliseconds since midnight, January 1, 1970 UTC. * @param actionIndex the index of the action button that was pressed. */ public void onNotificationActionClick(String key, long time, int actionIndex) { // Do nothing, Override this to collect action button click statistics } /** * A notification was removed. * @param key the notification key * @param time milliseconds since midnight, January 1, 1970 UTC. * @param reason see {@link #REASON_LISTENER_CANCEL}, etc. */ public void onNotificationRemoved(String key, long time, int reason) { // Do nothing, Override this to collect dismissal statistics } /** * Updates a notification. N.B. this won’t cause * an existing notification to alert, but might allow a future update to * this notification to alert. * * @param adjustment the adjustment with an explanation */ public final void adjustNotification(Adjustment adjustment) { if (!isBound()) return; try { getNotificationInterface().applyAdjustmentFromRankerService(mWrapper, adjustment); } catch (android.os.RemoteException ex) { Log.v(TAG, "Unable to contact notification manager", ex); } } /** * Updates existing notifications. Re-ranking won't occur until all adjustments are applied. * N.B. this won’t cause an existing notification to alert, but might allow a future update to * these notifications to alert. * * @param adjustments a list of adjustments with explanations */ public final void adjustNotifications(List<Adjustment> adjustments) { if (!isBound()) return; try { getNotificationInterface().applyAdjustmentsFromRankerService(mWrapper, adjustments); } catch (android.os.RemoteException ex) { Log.v(TAG, "Unable to contact notification manager", ex); } } private class NotificationRankingServiceWrapper extends NotificationListenerWrapper { @Override public void onNotificationEnqueued(IStatusBarNotificationHolder sbnHolder, int importance, boolean user) { StatusBarNotification sbn; try { sbn = sbnHolder.get(); } catch (RemoteException e) { Log.w(TAG, "onNotificationEnqueued: Error receiving StatusBarNotification", e); return; } SomeArgs args = SomeArgs.obtain(); args.arg1 = sbn; args.argi1 = importance; args.argi2 = user ? 1 : 0; mHandler.obtainMessage(MyHandler.MSG_ON_NOTIFICATION_ENQUEUED, args).sendToTarget(); } @Override public void onNotificationVisibilityChanged(String key, long time, boolean visible) { SomeArgs args = SomeArgs.obtain(); args.arg1 = key; args.arg2 = time; args.argi1 = visible ? 1 : 0; mHandler.obtainMessage(MyHandler.MSG_ON_NOTIFICATION_VISIBILITY_CHANGED, args).sendToTarget(); } @Override public void onNotificationClick(String key, long time) { SomeArgs args = SomeArgs.obtain(); args.arg1 = key; args.arg2 = time; mHandler.obtainMessage(MyHandler.MSG_ON_NOTIFICATION_CLICK, args).sendToTarget(); } @Override public void onNotificationActionClick(String key, long time, int actionIndex) { SomeArgs args = SomeArgs.obtain(); args.arg1 = key; args.arg2 = time; args.argi1 = actionIndex; mHandler.obtainMessage(MyHandler.MSG_ON_NOTIFICATION_ACTION_CLICK, args).sendToTarget(); } @Override public void onNotificationRemovedReason(String key, long time, int reason) { SomeArgs args = SomeArgs.obtain(); args.arg1 = key; args.arg2 = time; args.argi1 = reason; mHandler.obtainMessage(MyHandler.MSG_ON_NOTIFICATION_REMOVED_REASON, args).sendToTarget(); } } private final class MyHandler extends Handler { public static final int MSG_ON_NOTIFICATION_ENQUEUED = 1; public static final int MSG_ON_NOTIFICATION_VISIBILITY_CHANGED = 2; public static final int MSG_ON_NOTIFICATION_CLICK = 3; public static final int MSG_ON_NOTIFICATION_ACTION_CLICK = 4; public static final int MSG_ON_NOTIFICATION_REMOVED_REASON = 5; public MyHandler(Looper looper) { super(looper, null, false); } @Override public void handleMessage(Message msg) { switch (msg.what) { case MSG_ON_NOTIFICATION_ENQUEUED: { SomeArgs args = (SomeArgs) msg.obj; StatusBarNotification sbn = (StatusBarNotification) args.arg1; final int importance = args.argi1; final boolean user = args.argi2 == 1; args.recycle(); Adjustment adjustment = onNotificationEnqueued(sbn, importance, user); if (adjustment != null) { adjustNotification(adjustment); } } break; case MSG_ON_NOTIFICATION_VISIBILITY_CHANGED: { SomeArgs args = (SomeArgs) msg.obj; final String key = (String) args.arg1; final long time = (long) args.arg2; final boolean visible = args.argi1 == 1; args.recycle(); onNotificationVisibilityChanged(key, time, visible); } break; case MSG_ON_NOTIFICATION_CLICK: { SomeArgs args = (SomeArgs) msg.obj; final String key = (String) args.arg1; final long time = (long) args.arg2; args.recycle(); onNotificationClick(key, time); } break; case MSG_ON_NOTIFICATION_ACTION_CLICK: { SomeArgs args = (SomeArgs) msg.obj; final String key = (String) args.arg1; final long time = (long) args.arg2; final int actionIndex = args.argi1; args.recycle(); onNotificationActionClick(key, time, actionIndex); } break; case MSG_ON_NOTIFICATION_REMOVED_REASON: { SomeArgs args = (SomeArgs) msg.obj; final String key = (String) args.arg1; final long time = (long) args.arg2; final int reason = args.argi1; args.recycle(); onNotificationRemoved(key, time, reason); } break; } } } }
5,163
799
<gh_stars>100-1000 JOB_TEMPLATE_LAUNCH_RES = { "id": 1, "type": "job", "url": "/api/example", "created": "2020-12-29T14:38:47.792496Z", "modified": "2020-12-29T14:38:47.889460Z", "name": "<NAME>", "description": "", "job_type": "run", "inventory": 1, "project": 4, "playbook": "example.yml", "forks": 0, "limit": "", "verbosity": 0, "extra_vars": "{}", "job_tags": "", "force_handlers": False, "skip_tags": "", "timeout": 0, "use_fact_cache": False, "unified_job_template": 5, "launch_type": "manual", "status": "pending", "failed": False, "started": None, "finished": None, "elapsed": 0.0, "job_args": "", "job_cwd": "", "job_env": {}, "job_explanation": "", "execution_node": "", "controller_node": "", "result_traceback": "", "event_processing_finished": False, "job_template": 5, "passwords_needed_to_start": [], "ask_diff_mode_on_launch": False, "ask_variables_on_launch": False, "ask_limit_on_launch": False, "ask_tags_on_launch": False, "ask_skip_tags_on_launch": False, "ask_job_type_on_launch": False, "ask_verbosity_on_launch": False, "ask_inventory_on_launch": False, "ask_credential_on_launch": False, "allow_simultaneous": False, "artifacts": {}, "scm_revision": "", "instance_group": None, "diff_mode": False, "job_slice_number": 0, "job_slice_count": 1, "credential": 1, "vault_credential": None } JOB_TEMPLATE_EXPECTED = { 'artifacts': {}, 'controller_node': '', 'created': '2020-12-29 14:38:47.792496', 'credential': 1, 'description': '', 'elapsed': 0.0, 'execution_node': '', 'extra_vars': '{}', 'failed': False, 'finished': None, 'id': 1, 'instance_group': None, 'inventory': 1, 'job_args': '', 'job_cwd': '', 'job_explanation': '', 'job_tags': '', 'job_template': 5, 'job_type': 'run', 'launch_type': 'manual', 'limit': '', 'modified': '2020-12-29 14:38:47.889460', 'name': 'job example', 'passwords_needed_to_start': [], 'playbook': 'example.yml', 'project': 4, 'result_traceback': '', 'scm_revision': '', 'skip_tags': '', 'started': None, 'status': 'pending', 'timeout': 0, 'type': 'job', 'url': '/api/example', 'vault_credential': None} ADHOC_COMMAND_LAUNCH_RES = { "id": 1, "type": "ad_hoc_command", "url": "/api/v2/ad_hoc_commands/1/", "created": "2020-12-23T11:41:26.600084Z", "modified": "2020-12-23T11:41:26.636218Z", "name": "ping", "launch_type": "manual", "status": "pending", "failed": False, "started": None, "finished": None, "elapsed": 0.0, "job_args": "", "job_cwd": "", "job_explanation": "", "execution_node": "", "controller_node": "", "result_traceback": "", "event_processing_finished": False, "job_type": "run", "inventory": 1, "limit": "", "credential": 1, "module_name": "ping", "module_args": "", "forks": 0, "verbosity": 0, "extra_vars": "", "become_enabled": False, "diff_mode": False, "ad_hoc_command": 1 } ADHOC_COMMAND_LAUNCH_EXPECTED = \ {'ad_hoc_command': 1, 'become_enabled': False, 'controller_node': '', 'created': '2020-12-23 11:41:26.600084', 'credential': 1, 'elapsed': 0.0, 'execution_node': '', 'extra_vars': '', 'failed': False, 'finished': None, 'id': 1, 'inventory': 1, 'job_args': '', 'job_cwd': '', 'job_explanation': '', 'job_type': 'run', 'launch_type': 'manual', 'limit': '', 'modified': '2020-12-23 11:41:26.636218', 'module_args': '', 'module_name': 'ping', 'name': 'ping', 'result_traceback': '', 'started': None, 'status': 'pending', 'type': 'ad_hoc_command', 'url': '/api/v2/ad_hoc_commands/1/'}
1,938
665
<reponame>mldbai/mldb /************************************************************************* (c) Copyright 2003 <NAME> adapted from SVDPACKC, which is (c) Copyright 1993 University of Tennessee All Rights Reserved *************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <errno.h> #include <math.h> #include <fcntl.h> #include "svdlib.h" #include "svdutil.h" #include <iostream> #include <functional> #include <cmath> using namespace std; void svdParallelMapNaive(size_t first, size_t last, const std::function<void (size_t)> & doWork, int maxOccupancy) { for (size_t i = first; i < last; ++i) { doWork(i); } } void (*svdParallelMap) (size_t first, size_t last, const std::function<void (size_t)> & doWork, int maxOccupancy) = svdParallelMapNaive; void purge(long n, long ll, double *r, double *q, double *ra, double *qa, double *wrk, double *eta, double *oldeta, long step, double *rnmp, double tol, SVDParams & Params); void ortbnd(double *alf, double *eta, double *oldeta, double *bet, long step, double rnm, SVDParams & params); double startv(double *wptr[], long step, long n, SVDParams & params); //void store(long, long, long, double *); void imtql2(long, long, double *, double *, double *, SVDParams & params); void imtqlb(long n, double d[], double e[], double bnd[], SVDParams & params); void write_header(long, long, double, double, long, double, long, long, long); long check_parameters(SMat A, long dimensions, long iterations, double endl, double endr, long vectors); int lanso(long iterations, long dimensions, double endl, double endr, double *ritz, double *bnd, double *wptr[], long *neigp, long n, SVDParams & params); long ritvec(long n, SVDRec R, double kappa, double *ritz, double *bnd, double *alf, double *bet, double *w2, long steps, long neig, SVDParams & params); long lanczos_step(long first, long last, double *wptr[], double *alf, double *eta, double *oldeta, double *bet, long *ll, long *enough, double *rnmp, double *tolp, long n, SVDParams & params); void stpone(double *wrkptr[], double *rnmp, double *tolp, long n, SVDParams & params); long error_bound(long *, double, double, double *, double *, long step, double tol, SVDParams & params); // Machine params; only need to be done once long ibeta, it, irnd, machep, negep; double eps; void machar(long *ibeta, long *it, long *irnd, long *machep, long *negep, double *eps); struct AtInit { AtInit() { machar(&ibeta, &it, &irnd, &machep, &negep, &eps); } } atInit; /*********************************************************************** * * * main() * * Sparse SVD(A) via Eigensystem of A'A symmetric Matrix * * (double precision) * * * ***********************************************************************/ /*********************************************************************** Description ----------- This sample program uses landr to compute singular triplets of A via the equivalent symmetric eigenvalue problem B x = lambda x, where x' = (u',v'), lambda = sigma**2, where sigma is a singular value of A, B = A'A , and A is m (nrow) by n (ncol) (nrow >> ncol), so that {u,sqrt(lambda),v} is a singular triplet of A. (A' = transpose of A) User supplied routines: svd_opa, opb, store, timer svd_opa( x,y) takes an n-vector x and returns A*x in y. svd_opb(ncol,x,y) takes an n-vector x and returns B*x in y. Based on operation flag isw, store(n,isw,j,s) stores/retrieves to/from storage a vector of length n in s. User should edit timer() with an appropriate call to an intrinsic timing routine that returns elapsed user time. External parameters ------------------- Defined and documented in las2.h Local parameters ---------------- (input) endl left end of interval containing unwanted eigenvalues of B endr right end of interval containing unwanted eigenvalues of B kappa relative accuracy of ritz values acceptable as eigenvalues of B vectors is not equal to 1 r work array n dimension of the eigenproblem for matrix B (ncol) dimensions upper limit of desired number of singular triplets of A iterations upper limit of desired number of Lanczos steps nnzero number of nonzeros in A vectors 1 indicates both singular values and singular vectors are wanted and they can be found in output file lav2; 0 indicates only singular values are wanted (output) ritz array of ritz values bnd array of error bounds d array of singular values memory total memory allocated in bytes to solve the B-eigenproblem Functions used -------------- BLAS svd_daxpy, svd_dscal, svd_ddot USER svd_opa, svd_opb, timer MISC write_header, check_parameters LAS2 landr Precision --------- All floating-point calculations are done in double precision; variables are declared as long and double. LAS2 development ---------------- LAS2 is a C translation of the Fortran-77 LAS2 from the SVDPACK library written by <NAME>, University of Tennessee, Dept. of Computer Science, 107 Ayres Hall, Knoxville, TN, 37996-1301 31 Jan 1992: Date written <NAME> University of Tennessee Dept. of Computer Science 107 Ayres Hall Knoxville, TN, 37996-1301 internet: <EMAIL> ***********************************************************************/ /*********************************************************************** * * * write_header() * * Function writes out header of output file containing ritz values * * * ***********************************************************************/ void write_header(long iterations, long dimensions, double endl, double endr, long vectors, double kappa, long nrow, long ncol, long vals) { printf("SOLVING THE [A^TA] EIGENPROBLEM\n"); printf("NO. OF ROWS = %6ld\n", nrow); printf("NO. OF COLUMNS = %6ld\n", ncol); printf("NO. OF NON-ZERO VALUES = %6ld\n", vals); printf("MATRIX DENSITY = %6.2f%%\n", ((float) vals / nrow) * 100 / ncol); /* printf("ORDER OF MATRIX A = %5ld\n", n); */ printf("MAX. NO. OF LANCZOS STEPS = %6ld\n", iterations); printf("MAX. NO. OF EIGENPAIRS = %6ld\n", dimensions); printf("LEFT END OF THE INTERVAL = %9.2E\n", endl); printf("RIGHT END OF THE INTERVAL = %9.2E\n", endr); printf("KAPPA = %9.2E\n", kappa); /* printf("WANT S-VECTORS? [T/F] = %c\n", (vectors) ? 'T' : 'F'); */ printf("\n"); return; } /*********************************************************************** * * * landr() * * Lanczos algorithm with selective orthogonalization * * Using Simon's Recurrence * * (double precision) * * * ***********************************************************************/ /*********************************************************************** Description ----------- landr() is the LAS2 driver routine that, upon entry, (1) checks for the validity of input parameters of the B-eigenproblem (2) determines several machine constants (3) makes a Lanczos run (4) calculates B-eigenvectors (singular vectors of A) if requested by user arguments --------- (input) n dimension of the eigenproblem for A'A iterations upper limit of desired number of Lanczos steps dimensions upper limit of desired number of eigenpairs nnzero number of nonzeros in matrix A endl left end of interval containing unwanted eigenvalues of B endr right end of interval containing unwanted eigenvalues of B vectors 1 indicates both eigenvalues and eigenvectors are wanted and they can be found in output file lav2; 0 indicates only eigenvalues are wanted kappa relative accuracy of ritz values acceptable as eigenvalues of B (singular values of A) r work array (output) j number of Lanczos steps actually taken neig number of ritz values stabilized ritz array to hold the ritz values bnd array to hold the error bounds External parameters ------------------- Defined and documented in las2.h local parameters ------------------- ibeta radix for the floating-point representation it number of base ibeta digits in the floating-point significand irnd floating-point addition rounded or chopped machep machine relative precision or round-off error negeps largest negative integer wptr array of pointers each pointing to a work space Functions used -------------- MISC svd_dmax, machar, check_parameters LAS2 ritvec, lanso ***********************************************************************/ SVDRec svdLAS2A(long dimensions, SVDParams & params) { double end[2] = {-1.0e-30, 1.0e-30}; double kappa = 1e-6; return svdLAS2(dimensions, 0, end, kappa, params); } SVDRec svdLAS2(long dimensions, long iterations, double end[2], double kappa, SVDParams & params) { long i, steps, neig, m; double *wptr[10], *ritz, *bnd; SVDRec R = NULL; params.ierr = 0; // reset the global error flag svdResetCounters(); m = svd_imin(params.nrows, params.ncols); if (dimensions <= 0 || dimensions > m) dimensions = m; if (iterations <= 0 || iterations > m) iterations = m; if (iterations < dimensions) iterations = dimensions; /* Write output header */ if (SVDVerbosity > 0) write_header(iterations, dimensions, end[0], end[1], true, kappa, params.nrows, params.ncols, params.nvals); int n = params.ncols; //cerr << "ibeta = " << ibeta << " it = " << it << " irnd = " // << irnd << " machep = " << machep << " negep = " << negep // << " eps1 = " << params.eps1 << " reps = " << params.reps << " eps34 = " // << params.eps34 << " eps " << eps << endl; /* Allocate temporary space. */ if (!(wptr[0] = svd_doubleArray(n, true, "las2: wptr[0]"))) goto abort; if (!(wptr[1] = svd_doubleArray(n, false, "las2: wptr[1]"))) goto abort; if (!(wptr[2] = svd_doubleArray(n, false, "las2: wptr[2]"))) goto abort; if (!(wptr[3] = svd_doubleArray(n, false, "las2: wptr[3]"))) goto abort; if (!(wptr[4] = svd_doubleArray(n, false, "las2: wptr[4]"))) goto abort; if (!(wptr[5] = svd_doubleArray(n, false, "las2: wptr[5]"))) goto abort; if (!(wptr[6] = svd_doubleArray(iterations, false, "las2: wptr[6]"))) goto abort; if (!(wptr[7] = svd_doubleArray(iterations, false, "las2: wptr[7]"))) goto abort; if (!(wptr[8] = svd_doubleArray(iterations, false, "las2: wptr[8]"))) goto abort; if (!(wptr[9] = svd_doubleArray(iterations + 1, false, "las2: wptr[9]"))) goto abort; /* Calloc may be unnecessary: */ if (!(ritz = svd_doubleArray(iterations + 1, true, "las2: ritz"))) goto abort; /* Calloc may be unnecessary: */ if (!(bnd = svd_doubleArray(iterations + 1, true, "las2: bnd"))) goto abort; memset(bnd, 127, (iterations + 1) * sizeof(double)); if (!(params.LanStore = (double **) calloc(iterations + MAXLL, sizeof(double *)))) goto abort; /* Actually run the lanczos thing: */ steps = lanso(iterations, dimensions, end[0], end[1], ritz, bnd, wptr, &neig, n, params); /* Print some stuff. */ if (SVDVerbosity > 0) { printf("NUMBER OF LANCZOS STEPS = %6ld\n" "RITZ VALUES STABILIZED = %6ld\n", steps + 1, neig); } if (SVDVerbosity > 2) { printf("\nCOMPUTED RITZ VALUES (ERROR BNDS)\n"); for (i = 0; i <= steps; i++) printf("%3ld %22.14E (%11.2E)\n", i + 1, ritz[i], bnd[i]); } SAFE_FREE(wptr[0]); SAFE_FREE(wptr[1]); SAFE_FREE(wptr[2]); SAFE_FREE(wptr[3]); SAFE_FREE(wptr[4]); SAFE_FREE(wptr[7]); SAFE_FREE(wptr[8]); /* Compute eigenvectors */ kappa = svd_dmax(fabs(kappa), params.eps34); R = svdNewSVDRec(); if (!R) { svd_error("svdLAS2: allocation of R failed"); goto cleanup; } R->d = /*svd_imin(nsig, dimensions)*/dimensions; if (params.doU) { R->Ut = svdNewDMat(R->d, params.nrows); if (!R->Ut) { svd_error("svdLAS2: allocation of R->Ut failed"); goto cleanup; } } else R->Ut = 0; R->S = svd_doubleArray(R->d, true, "las2: R->s"); if (!R->S) { svd_error("svdLAS2: allocation of R->S failed"); goto cleanup; } R->Vt = svdNewDMat(R->d, params.ncols); if (!R->Vt) { svd_error("svdLAS2: allocation of R failed"); goto cleanup; } ritvec(n, R, kappa, ritz, bnd, wptr[6], wptr[9], wptr[5], steps, neig, params); if (SVDVerbosity > 0) printf("SINGULAR VALUES FOUND = %6d\n", R->d); cleanup: for (i = 0; i <= 9; i++) SAFE_FREE(wptr[i]); SAFE_FREE(ritz); SAFE_FREE(bnd); if (params.LanStore) { for (i = 0; i < iterations + MAXLL; i++) SAFE_FREE(params.LanStore[i]); SAFE_FREE(params.LanStore); } return R; abort: svd_error("svdLAS2: fatal error, aborting"); return NULL; } /*********************************************************************** * * * ritvec() * * Function computes the singular vectors of matrix A * * * ***********************************************************************/ /*********************************************************************** Description ----------- This function is invoked by landr() only if eigenvectors of the A'A eigenproblem are desired. When called, ritvec() computes the singular vectors of A and writes the result to an unformatted file. Parameters ---------- (input) nrow number of rows of A steps number of Lanczos iterations performed fp_out2 pointer to unformatted output file n dimension of matrix A kappa relative accuracy of ritz values acceptable as eigenvalues of A'A ritz array of ritz values bnd array of error bounds alf array of diagonal elements of the tridiagonal matrix T bet array of off-diagonal elements of T w1, w2 work space (output) xv1 array of eigenvectors of A'A (right singular vectors of A) ierr error code 0 for normal return from imtql2() k if convergence did not occur for k-th eigenvalue in imtql2() nsig number of accepted ritz values based on kappa (local) s work array which is initialized to the identity matrix of order (j + 1) upon calling imtql2(). After the call, s contains the orthonormal eigenvectors of the symmetric tridiagonal matrix T Functions used -------------- BLAS svd_dscal, svd_dcopy, svd_daxpy USER store imtql2 ***********************************************************************/ void rotateArray(double *a, int size, int x) { int i, j, n, start; double t1, t2; if (x == 0) return; j = start = 0; t1 = a[0]; for (i = 0; i < size; i++) { n = (j >= x) ? j - x : j + size - x; t2 = a[n]; a[n] = t1; t1 = t2; j = n; if (j == start) { start = ++j; t1 = a[j]; } } } long ritvec(long n, SVDRec R, double kappa, double *ritz, double *bnd, double *alf, double *bet, double *w2, long steps, long neig, SVDParams & params) { long js, jsq, i, k, /*size,*/ id2, tmp, nsig, x; double *s, *w1 = R->Vt->value[0]; js = steps + 1; jsq = js * js; /*size = sizeof(double) * n;*/ s = svd_doubleArray(jsq, true, "ritvec: s"); /* initialize s to an identity matrix */ for (i = 0; i < jsq; i+= (js+1)) s[i] = 1.0; svd_dcopy(js, alf, 1, w1, -1); svd_dcopy(steps, &bet[1], 1, &w2[1], -1); /* on return from imtql2(), w1 contains eigenvalues in ascending * order and s contains the corresponding eigenvectors */ imtql2(js, js, w1, w2, s, params); /*fwrite((char *)&n, sizeof(n), 1, fp_out2); fwrite((char *)&js, sizeof(js), 1, fp_out2); fwrite((char *)&kappa, sizeof(kappa), 1, fp_out2);*/ /*id = 0;*/ nsig = 0; if(params.ierr) { R->d = 0; } else { x = 0; id2 = jsq - js; for (k = 0; k < js; k++) { tmp = id2; if (bnd[k] <= kappa * fabs(ritz[k]) && k > js-neig-1) { if (--x < 0) x = R->d - 1; w1 = R->Vt->value[x]; for (i = 0; i < n; i++) w1[i] = 0.0; for (i = 0; i < js; i++) { params.store(n, RETRQ, i, w2); svd_daxpy(n, s[tmp], w2, 1, w1, 1); tmp -= js; } /*fwrite((char *)w1, size, 1, fp_out2);*/ /* store the w1 vector row-wise in array xv1; * size of xv1 is (steps+1) * (nrow+ncol) elements * and each vector, even though only ncol long, * will have (nrow+ncol) elements in xv1. * It is as if xv1 is a 2-d array (steps+1) by * (nrow+ncol) and each vector occupies a row */ /* j is the index in the R arrays, which are sorted by high to low singular values. */ /*for (i = 0; i < n; i++) R->Vt->value[x]xv1[id++] = w1[i];*/ /*id += nrow;*/ nsig++; } id2++; } /* Rotate the singular vectors and values. */ /* x is now the location of the highest singular value. */ rotateArray(R->Vt->value[0], R->Vt->rows * R->Vt->cols, x * R->Vt->cols); R->d = svd_imin(R->d, nsig); auto doOutput = [&] (int x) { /* multiply by matrix B first */ double xv2[n]; params.opb(R->Vt->value[x], xv2); double tmp0 = svd_ddot(n, R->Vt->value[x], 1, xv2, 1); svd_daxpy(n, -tmp0, R->Vt->value[x], 1, xv2, 1); tmp0 = sqrt(tmp0); double xnorm = sqrt(svd_ddot(n, xv2, 1, xv2, 1)); /* multiply by matrix A to get (scaled) left s-vector */ if (params.doU) params.opa(R->Vt->value[x], R->Ut->value[x]); double tmp1 = 1.0 / tmp0; if (params.doU) svd_dscal(params.nrows, tmp1, R->Ut->value[x], 1); xnorm *= tmp1; bnd[i] = xnorm; R->S[x] = tmp0; }; svdParallelMap(0, R->d, doOutput, -1 /* max occupancy */); } SAFE_FREE(s); return nsig; } /*********************************************************************** * * * lanso() * * * ***********************************************************************/ /*********************************************************************** Description ----------- Function determines when the restart of the Lanczos algorithm should occur and when it should terminate. Arguments --------- (input) n dimension of the eigenproblem for matrix B iterations upper limit of desired number of lanczos steps dimensions upper limit of desired number of eigenpairs endl left end of interval containing unwanted eigenvalues endr right end of interval containing unwanted eigenvalues ritz array to hold the ritz values bnd array to hold the error bounds wptr array of pointers that point to work space: wptr[0]-wptr[5] six vectors of length n wptr[6] array to hold diagonal of the tridiagonal matrix T wptr[9] array to hold off-diagonal of T wptr[7] orthogonality estimate of Lanczos vectors at step j wptr[8] orthogonality estimate of Lanczos vectors at step j-1 (output) j number of Lanczos steps actually taken neig number of ritz values stabilized ritz array to hold the ritz values bnd array to hold the error bounds ierr (globally declared) error flag ierr = 8192 if stpone() fails to find a starting vector ierr = k if convergence did not occur for k-th eigenvalue in imtqlb() ierr = 0 otherwise Functions used -------------- LAS stpone, error_bound, lanczos_step MISC svd_dsort2 UTILITY svd_imin, svd_imax ***********************************************************************/ int lanso(long iterations, long dimensions, double endl, double endr, double *ritz, double *bnd, double *wptr[], long *neigp, long n, SVDParams & params) { double *alf, *eta, *oldeta, *bet, *wrk, rnm, tol; long ll, first, last, ENOUGH, id2, id3, i, l, neig, j = 0, intro = 0; alf = wptr[6]; eta = wptr[7]; oldeta = wptr[8]; bet = wptr[9]; wrk = wptr[5]; /* take the first step */ stpone(wptr, &rnm, &tol, n, params); if (!rnm || params.ierr) return 0; eta[0] = params.eps1; oldeta[0] = params.eps1; ll = 0; first = 1; last = svd_imin(dimensions + svd_imax(8, dimensions), iterations); ENOUGH = false; /*id1 = 0;*/ while (/*id1 < dimensions && */!ENOUGH) { if (rnm <= tol) rnm = 0.0; /* the actual lanczos loop */ j = lanczos_step(first, last, wptr, alf, eta, oldeta, bet, &ll, &ENOUGH, &rnm, &tol, n, params); if (ENOUGH) j = j - 1; else j = last - 1; first = j + 1; bet[j+1] = rnm; /* analyze T */ l = 0; for (id2 = 0; id2 < j; id2++) { if (l > j) break; for (i = l; i <= j; i++) if (!bet[i+1]) break; if (i > j) i = j; /* now i is at the end of an unreduced submatrix */ svd_dcopy(i-l+1, &alf[l], 1, &ritz[l], -1); svd_dcopy(i-l, &bet[l+1], 1, &wrk[l+1], -1); imtqlb(i-l+1, &ritz[l], &wrk[l], &bnd[l], params); if (params.ierr) { svd_error("svdLAS2: imtqlb failed to converge (ierr = %ld)\n", params.ierr); svd_error(" l = %ld i = %ld\n", l, i); for (id3 = l; id3 <= i; id3++) svd_error(" %ld %lg %lg %lg\n", id3, ritz[id3], wrk[id3], bnd[id3]); } for (id3 = l; id3 <= i; id3++) bnd[id3] = rnm * fabs(bnd[id3]); l = i + 1; } /* sort eigenvalues into increasing order */ svd_dsort2((j+1) / 2, j + 1, ritz, bnd); /* for (i = 0; i < iterations; i++) printf("%f ", ritz[i]); printf("\n"); */ /* massage error bounds for very close ritz values */ neig = error_bound(&ENOUGH, endl, endr, ritz, bnd, j, tol, params); *neigp = neig; /* should we stop? */ if (neig < dimensions) { if (!neig) { last = first + 9; intro = first; } else last = first + svd_imax(3, 1 + ((j - intro) * (dimensions-neig)) / neig); last = svd_imin(last, iterations); } else ENOUGH = true; ENOUGH = ENOUGH || first >= iterations; /* id1++; */ /* printf("id1=%d dimen=%d first=%d\n", id1, dimensions, first); */ } params.store(n, STORQ, j, wptr[1]); return j; } /*********************************************************************** * * * lanczos_step() * * * ***********************************************************************/ /*********************************************************************** Description ----------- Function embodies a single Lanczos step Arguments --------- (input) n dimension of the eigenproblem for matrix B first start of index through loop last end of index through loop wptr array of pointers pointing to work space alf array to hold diagonal of the tridiagonal matrix T eta orthogonality estimate of Lanczos vectors at step j oldeta orthogonality estimate of Lanczos vectors at step j-1 bet array to hold off-diagonal of T ll number of intitial Lanczos vectors in local orthog. (has value of 0, 1 or 2) enough stop flag Functions used -------------- BLAS svd_ddot, svd_dscal, svd_daxpy, svd_datx, svd_dcopy USER store LAS purge, ortbnd, startv UTILITY svd_imin, svd_imax ***********************************************************************/ long lanczos_step(long first, long last, double *wptr[], double *alf, double *eta, double *oldeta, double *bet, long *ll, long *enough, double *rnmp, double *tolp, long n, SVDParams & params) { double t, *mid, rnm = *rnmp, tol = *tolp, anorm; long i, j; for (j=first; j<last; j++) { mid = wptr[2]; wptr[2] = wptr[1]; wptr[1] = mid; mid = wptr[3]; wptr[3] = wptr[4]; wptr[4] = mid; params.store(n, STORQ, j-1, wptr[2]); if (j-1 < MAXLL) params.store(n, STORP, j-1, wptr[4]); bet[j] = rnm; /* restart if invariant subspace is found */ if (!bet[j]) { rnm = startv(wptr, j, n, params); if (params.ierr) return j; if (!rnm) *enough = true; } if (*enough) { /* added by Doug... */ /* These lines fix a bug that occurs with low-rank matrices */ mid = wptr[2]; wptr[2] = wptr[1]; wptr[1] = mid; /* ...added by Doug */ break; } /* take a lanczos step */ t = 1.0 / rnm; svd_datx(n, t, wptr[0], 1, wptr[1], 1); svd_dscal(n, t, wptr[3], 1); params.opb(wptr[3], wptr[0]); svd_daxpy(n, -rnm, wptr[2], 1, wptr[0], 1); alf[j] = svd_ddot(n, wptr[0], 1, wptr[3], 1); svd_daxpy(n, -alf[j], wptr[1], 1, wptr[0], 1); /* orthogonalize against initial lanczos vectors */ if (j <= MAXLL && (fabs(alf[j-1]) > 4.0 * fabs(alf[j]))) *ll = j; for (i=0; i < svd_imin(*ll, j-1); i++) { params.store(n, RETRP, i, wptr[5]); t = svd_ddot(n, wptr[5], 1, wptr[0], 1); params.store(n, RETRQ, i, wptr[5]); svd_daxpy(n, -t, wptr[5], 1, wptr[0], 1); eta[i] = params.eps1; oldeta[i] = params.eps1; } /* extended local reorthogonalization */ t = svd_ddot(n, wptr[0], 1, wptr[4], 1); svd_daxpy(n, -t, wptr[2], 1, wptr[0], 1); if (bet[j] > 0.0) bet[j] = bet[j] + t; t = svd_ddot(n, wptr[0], 1, wptr[3], 1); svd_daxpy(n, -t, wptr[1], 1, wptr[0], 1); alf[j] = alf[j] + t; svd_dcopy(n, wptr[0], 1, wptr[4], 1); rnm = sqrt(svd_ddot(n, wptr[0], 1, wptr[4], 1)); anorm = bet[j] + fabs(alf[j]) + rnm; tol = params.reps * anorm; /* update the orthogonality bounds */ ortbnd(alf, eta, oldeta, bet, j, rnm, params); /* restore the orthogonality state when needed */ purge(n, *ll, wptr[0], wptr[1], wptr[4], wptr[3], wptr[5], eta, oldeta, j, &rnm, tol, params); if (rnm <= tol) rnm = 0.0; } *rnmp = rnm; *tolp = tol; return j; } /*********************************************************************** * * * ortbnd() * * * ***********************************************************************/ /*********************************************************************** Description ----------- Funtion updates the eta recurrence Arguments --------- (input) alf array to hold diagonal of the tridiagonal matrix T eta orthogonality estimate of Lanczos vectors at step j oldeta orthogonality estimate of Lanczos vectors at step j-1 bet array to hold off-diagonal of T n dimension of the eigenproblem for matrix B j dimension of T rnm norm of the next residual vector eps1 roundoff estimate for dot product of two unit vectors (output) eta orthogonality estimate of Lanczos vectors at step j+1 oldeta orthogonality estimate of Lanczos vectors at step j Functions used -------------- BLAS svd_dswap ***********************************************************************/ void ortbnd(double *alf, double *eta, double *oldeta, double *bet, long step, double rnm, SVDParams & params) { long i; if (step < 1) return; if (rnm) { if (step > 1) { oldeta[0] = (bet[1] * eta[1] + (alf[0]-alf[step]) * eta[0] - bet[step] * oldeta[0]) / rnm + params.eps1; } for (i=1; i<=step-2; i++) oldeta[i] = (bet[i+1] * eta[i+1] + (alf[i]-alf[step]) * eta[i] + bet[i] * eta[i-1] - bet[step] * oldeta[i])/rnm + params.eps1; } oldeta[step-1] = params.eps1; svd_dswap(step, oldeta, 1, eta, 1); eta[step] = params.eps1; return; } /*********************************************************************** * * * purge() * * * ***********************************************************************/ /*********************************************************************** Description ----------- Function examines the state of orthogonality between the new Lanczos vector and the previous ones to decide whether re-orthogonalization should be performed Arguments --------- (input) n dimension of the eigenproblem for matrix B ll number of intitial Lanczos vectors in local orthog. r residual vector to become next Lanczos vector q current Lanczos vector ra previous Lanczos vector qa previous Lanczos vector wrk temporary vector to hold the previous Lanczos vector eta state of orthogonality between r and prev. Lanczos vectors oldeta state of orthogonality between q and prev. Lanczos vectors j current Lanczos step (output) r residual vector orthogonalized against previous Lanczos vectors q current Lanczos vector orthogonalized against previous ones Functions used -------------- BLAS svd_daxpy, svd_dcopy, svd_idamax, svd_ddot USER store ***********************************************************************/ void purge(long n, long ll, double *r, double *q, double *ra, double *qa, double *wrk, double *eta, double *oldeta, long step, double *rnmp, double tol, SVDParams & params) { double t, tq, tr, reps1, rnm = *rnmp; long k, iteration, flag, i; if (step < ll+2) return; k = svd_idamax(step - (ll+1), &eta[ll], 1) + ll; if (fabs(eta[k]) > params.reps) { reps1 = params.eps1 / params.reps; iteration = 0; flag = true; while (iteration < 2 && flag) { if (rnm > tol) { /* bring in a lanczos vector t and orthogonalize both * r and q against it */ tq = 0.0; tr = 0.0; for (i = ll; i < step; i++) { params.store(n, RETRQ, i, wrk); t = -svd_ddot(n, qa, 1, wrk, 1); tq += fabs(t); svd_daxpy(n, t, wrk, 1, q, 1); t = -svd_ddot(n, ra, 1, wrk, 1); tr += fabs(t); svd_daxpy(n, t, wrk, 1, r, 1); } svd_dcopy(n, q, 1, qa, 1); t = -svd_ddot(n, r, 1, qa, 1); tr += fabs(t); svd_daxpy(n, t, q, 1, r, 1); svd_dcopy(n, r, 1, ra, 1); rnm = sqrt(svd_ddot(n, ra, 1, r, 1)); if (tq <= reps1 && tr <= reps1 * rnm) flag = false; } iteration++; } for (i = ll; i <= step; i++) { eta[i] = params.eps1; oldeta[i] = params.eps1; } } *rnmp = rnm; return; } /*********************************************************************** * * * stpone() * * * ***********************************************************************/ /*********************************************************************** Description ----------- Function performs the first step of the Lanczos algorithm. It also does a step of extended local re-orthogonalization. Arguments --------- (input) n dimension of the eigenproblem for matrix B (output) ierr error flag wptr array of pointers that point to work space that contains wptr[0] r[j] wptr[1] q[j] wptr[2] q[j-1] wptr[3] p wptr[4] p[j-1] wptr[6] diagonal elements of matrix T Functions used -------------- BLAS svd_daxpy, svd_datx, svd_dcopy, svd_ddot, svd_dscal USER store, opb LAS startv ***********************************************************************/ void stpone(double *wrkptr[], double *rnmp, double *tolp, long n, SVDParams & params) { double t, *alf, rnm, anorm; alf = wrkptr[6]; /* get initial vector; default is random */ rnm = startv(wrkptr, 0, n, params); if (rnm == 0.0 || params.ierr != 0) return; /* normalize starting vector */ t = 1.0 / rnm; svd_datx(n, t, wrkptr[0], 1, wrkptr[1], 1); svd_dscal(n, t, wrkptr[3], 1); /* take the first step */ params.opb(wrkptr[3], wrkptr[0]); alf[0] = svd_ddot(n, wrkptr[0], 1, wrkptr[3], 1); svd_daxpy(n, -alf[0], wrkptr[1], 1, wrkptr[0], 1); t = svd_ddot(n, wrkptr[0], 1, wrkptr[3], 1); svd_daxpy(n, -t, wrkptr[1], 1, wrkptr[0], 1); alf[0] += t; svd_dcopy(n, wrkptr[0], 1, wrkptr[4], 1); rnm = sqrt(svd_ddot(n, wrkptr[0], 1, wrkptr[4], 1)); anorm = rnm + fabs(alf[0]); *rnmp = rnm; *tolp = params.reps * anorm; return; } /*********************************************************************** * * * startv() * * * ***********************************************************************/ /*********************************************************************** Description ----------- Function delivers a starting vector in r and returns |r|; it returns zero if the range is spanned, and ierr is non-zero if no starting vector within range of operator can be found. Parameters --------- (input) n dimension of the eigenproblem matrix B wptr array of pointers that point to work space j starting index for a Lanczos run eps machine epsilon (relative precision) (output) wptr array of pointers that point to work space that contains r[j], q[j], q[j-1], p[j], p[j-1] ierr error flag (nonzero if no starting vector can be found) Functions used -------------- BLAS svd_ddot, svd_dcopy, svd_daxpy USER svd_opb, store MISC random ***********************************************************************/ double startv(double *wptr[], long step, long n, SVDParams & params) { double rnm2, *r, t; long irand; long id, i; /* get initial vector; default is random */ rnm2 = svd_ddot(n, wptr[0], 1, wptr[0], 1); irand = 918273 + step; r = wptr[0]; for (id = 0; id < 3; id++) { if (id > 0 || step > 0 || rnm2 == 0) for (i = 0; i < n; i++) r[i] = svd_random2(&irand); svd_dcopy(n, wptr[0], 1, wptr[3], 1); /* apply operator to put r in range (essential if m singular) */ params.opb(wptr[3], wptr[0]); svd_dcopy(n, wptr[0], 1, wptr[3], 1); rnm2 = svd_ddot(n, wptr[0], 1, wptr[3], 1); if (rnm2 > 0.0) break; } /* fatal error */ if (rnm2 <= 0.0) { params.ierr = 8192; return(-1); } if (step > 0) { for (i = 0; i < step; i++) { params.store(n, RETRQ, i, wptr[5]); t = -svd_ddot(n, wptr[3], 1, wptr[5], 1); svd_daxpy(n, t, wptr[5], 1, wptr[0], 1); } /* make sure q[step] is orthogonal to q[step-1] */ t = svd_ddot(n, wptr[4], 1, wptr[0], 1); svd_daxpy(n, -t, wptr[2], 1, wptr[0], 1); svd_dcopy(n, wptr[0], 1, wptr[3], 1); t = svd_ddot(n, wptr[3], 1, wptr[0], 1); if (t <= eps * rnm2) t = 0.0; rnm2 = t; } return(sqrt(rnm2)); } /*********************************************************************** * * * error_bound() * * * ***********************************************************************/ /*********************************************************************** Description ----------- Function massages error bounds for very close ritz values by placing a gap between them. The error bounds are then refined to reflect this. Arguments --------- (input) endl left end of interval containing unwanted eigenvalues endr right end of interval containing unwanted eigenvalues ritz array to store the ritz values bnd array to store the error bounds enough stop flag Functions used -------------- BLAS svd_idamax UTILITY svd_dmin ***********************************************************************/ long error_bound(long *enough, double endl, double endr, double *ritz, double *bnd, long step, double tol, SVDParams & params) { long mid, i, neig; double gapl, gap; /* massage error bounds for very close ritz values */ mid = svd_idamax(step + 1, bnd, 1); for (i=((step+1) + (step-1)) / 2; i >= mid + 1; i -= 1) if (fabs(ritz[i-1] - ritz[i]) < params.eps34 * fabs(ritz[i])) if (bnd[i] > tol && bnd[i-1] > tol) { bnd[i-1] = sqrt(bnd[i] * bnd[i] + bnd[i-1] * bnd[i-1]); bnd[i] = 0.0; } for (i=((step+1) - (step-1)) / 2; i <= mid - 1; i +=1 ) if (fabs(ritz[i+1] - ritz[i]) < params.eps34 * fabs(ritz[i])) if (bnd[i] > tol && bnd[i+1] > tol) { bnd[i+1] = sqrt(bnd[i] * bnd[i] + bnd[i+1] * bnd[i+1]); bnd[i] = 0.0; } /* refine the error bounds */ neig = 0; gapl = ritz[step] - ritz[0]; for (i = 0; i <= step; i++) { gap = gapl; if (i < step) gapl = ritz[i+1] - ritz[i]; gap = svd_dmin(gap, gapl); if (gap > bnd[i]) bnd[i] = bnd[i] * (bnd[i] / gap); if (bnd[i] <= 16.0 * eps * fabs(ritz[i])) { neig++; if (!*enough) *enough = endl < ritz[i] && ritz[i] < endr; } } return neig; } /*********************************************************************** * * * imtqlb() * * * ***********************************************************************/ /*********************************************************************** Description ----------- imtqlb() is a translation of a Fortran version of the Algol procedure IMTQL1, Num. Math. 12, 377-383(1968) by Martin and Wilkinson, as modified in Num. Math. 15, 450(1970) by Dubrulle. Handbook for Auto. Comp., vol.II-Linear Algebra, 241-248(1971). See also <NAME> al, Eispack Guide, Lecture Notes in Computer Science, Springer-Verlag, (1976). The function finds the eigenvalues of a symmetric tridiagonal matrix by the implicit QL method. Arguments --------- (input) n order of the symmetric tridiagonal matrix d contains the diagonal elements of the input matrix e contains the subdiagonal elements of the input matrix in its last n-1 positions. e[0] is arbitrary (output) d contains the eigenvalues in ascending order. if an error exit is made, the eigenvalues are correct and ordered for indices 0,1,...ierr, but may not be the smallest eigenvalues. e has been destroyed. ierr set to zero for normal return, j if the j-th eigenvalue has not been determined after 30 iterations. Functions used -------------- UTILITY svd_fsign MISC svd_pythag ***********************************************************************/ void imtqlb(long n, double d[], double e[], double bnd[], SVDParams & params) { long last, l, m, i, iteration; /* various flags */ long exchange, convergence, underflow; double b, test, g, r, s, c, p, f; if (n == 1) return; params.ierr = 0; bnd[0] = 1.0; last = n - 1; for (i = 1; i < n; i++) { bnd[i] = 0.0; e[i-1] = e[i]; } e[last] = 0.0; for (l = 0; l < n; l++) { iteration = 0; while (iteration <= 30) { for (m = l; m < n; m++) { convergence = false; if (m == last) break; else { test = fabs(d[m]) + fabs(d[m+1]); if (test + fabs(e[m]) == test) convergence = true; } if (convergence) break; } p = d[l]; f = bnd[l]; if (m != l) { if (iteration == 30) { params.ierr = l; return; } iteration += 1; /*........ form shift ........*/ g = (d[l+1] - p) / (2.0 * e[l]); if (!isfinite(g)) throw std::runtime_error("NaN or infinite g value; probably you " "have asked for more singular values than " "the effective rank of the matrix"); //cerr << "l = " << l << " n = " << n << " iteration = " // << iteration // << " g = " << g << " d[l + 1] = " << d[l + 1] // << " p = " << p << " e[l] = " << e[l] << endl; r = svd_pythag(g, 1.0); g = d[m] - p + e[l] / (g + svd_fsign(r, g)); s = 1.0; c = 1.0; p = 0.0; underflow = false; i = m - 1; while (underflow == false && i >= l) { f = s * e[i]; b = c * e[i]; r = svd_pythag(f, g); e[i+1] = r; if (r == 0.0) underflow = true; else { s = f / r; c = g / r; g = d[i+1] - p; r = (d[i] - g) * s + 2.0 * c * b; p = s * r; d[i+1] = g + p; g = c * r - b; f = bnd[i+1]; bnd[i+1] = s * bnd[i] + c * f; bnd[i] = c * bnd[i] - s * f; i--; } } /* end while (underflow != false && i >= l) */ /*........ recover from underflow .........*/ if (underflow) { d[i+1] -= p; e[m] = 0.0; } else { d[l] -= p; e[l] = g; e[m] = 0.0; } } /* end if (m != l) */ else { /* order the eigenvalues */ exchange = true; if (l != 0) { i = l; while (i >= 1 && exchange == true) { if (p < d[i-1]) { d[i] = d[i-1]; bnd[i] = bnd[i-1]; i--; } else exchange = false; } } if (exchange) i = 0; d[i] = p; bnd[i] = f; iteration = 31; } } /* end while (iteration <= 30) */ } /* end for (l=0; l<n; l++) */ return; } /* end main */ /*********************************************************************** * * * imtql2() * * * ***********************************************************************/ /*********************************************************************** Description ----------- imtql2() is a translation of a Fortran version of the Algol procedure IMTQL2, Num. Math. 12, 377-383(1968) by Martin and Wilkinson, as modified in Num. Math. 15, 450(1970) by Dubrulle. Handbook for Auto. Comp., vol.II-Linear Algebra, 241-248(1971). See also <NAME> et al, Eispack Guide, Lecture Notes in Computer Science, Springer-Verlag, (1976). This function finds the eigenvalues and eigenvectors of a symmetric tridiagonal matrix by the implicit QL method. Arguments --------- (input) nm row dimension of the symmetric tridiagonal matrix n order of the matrix d contains the diagonal elements of the input matrix e contains the subdiagonal elements of the input matrix in its last n-1 positions. e[0] is arbitrary z contains the identity matrix (output) d contains the eigenvalues in ascending order. if an error exit is made, the eigenvalues are correct but unordered for for indices 0,1,...,ierr. e has been destroyed. z contains orthonormal eigenvectors of the symmetric tridiagonal (or full) matrix. if an error exit is made, z contains the eigenvectors associated with the stored eigenvalues. ierr set to zero for normal return, j if the j-th eigenvalue has not been determined after 30 iterations. Functions used -------------- UTILITY svd_fsign MISC svd_pythag ***********************************************************************/ void imtql2(long nm, long n, double d[], double e[], double z[], SVDParams & params) { long index, nnm, j, last, l, m, i, k, iteration, convergence, underflow; double b, test, g, r, s, c, p, f; if (n == 1) return; params.ierr = 0; last = n - 1; for (i = 1; i < n; i++) e[i-1] = e[i]; e[last] = 0.0; nnm = n * nm; for (l = 0; l < n; l++) { iteration = 0; /* look for small sub-diagonal element */ while (iteration <= 30) { for (m = l; m < n; m++) { convergence = false; if (m == last) break; else { test = fabs(d[m]) + fabs(d[m+1]); if (test + fabs(e[m]) == test) convergence = true; } if (convergence) break; } if (m != l) { /* set error -- no convergence to an eigenvalue after * 30 iterations. */ if (iteration == 30) { params.ierr = l; return; } p = d[l]; iteration += 1; /* form shift */ g = (d[l+1] - p) / (2.0 * e[l]); r = svd_pythag(g, 1.0); g = d[m] - p + e[l] / (g + svd_fsign(r, g)); if (!isfinite(g)) throw std::runtime_error("NaN or infinite g value; probably you " "have asked for more singular values than " "the effective rank of the matrix"); s = 1.0; c = 1.0; p = 0.0; underflow = false; i = m - 1; while (underflow == false && i >= l) { f = s * e[i]; b = c * e[i]; r = svd_pythag(f, g); e[i+1] = r; if (r == 0.0) underflow = true; else { s = f / r; c = g / r; g = d[i+1] - p; r = (d[i] - g) * s + 2.0 * c * b; p = s * r; d[i+1] = g + p; g = c * r - b; /* form vector */ for (k = 0; k < nnm; k += n) { index = k + i; f = z[index+1]; z[index+1] = s * z[index] + c * f; z[index] = c * z[index] - s * f; } i--; } } /* end while (underflow != false && i >= l) */ /*........ recover from underflow .........*/ if (underflow) { d[i+1] -= p; e[m] = 0.0; } else { d[l] -= p; e[l] = g; e[m] = 0.0; } } else break; } /*...... end while (iteration <= 30) .........*/ } /*...... end for (l=0; l<n; l++) .............*/ /* order the eigenvalues */ for (l = 1; l < n; l++) { i = l - 1; k = i; p = d[i]; for (j = l; j < n; j++) { if (d[j] < p) { k = j; p = d[j]; } } /* ...and corresponding eigenvectors */ if (k != i) { d[k] = d[i]; d[i] = p; for (j = 0; j < nnm; j += n) { p = z[j+i]; z[j+i] = z[j+k]; z[j+k] = p; } } } return; } /*...... end main ............................*/ /*********************************************************************** * * * machar() * * * ***********************************************************************/ /*********************************************************************** Description ----------- This function is a partial translation of a Fortran-77 subroutine written by <NAME> of Argonne National Laboratory. It dynamically determines the listed machine parameters of the floating-point arithmetic. According to the documentation of the Fortran code, "the determination of the first three uses an extension of an algorithm due to M. Malcolm, ACM 15 (1972), pp. 949-951, incorporating some, but not all, of the improvements suggested by <NAME> and <NAME>, CACM 17 (1974), pp. 276-277." The complete Fortran version of this translation is documented in <NAME>, "Machar: a Subroutine to Dynamically Determine Determine Machine Parameters," TOMS 14, December, 1988. Parameters reported ------------------- ibeta the radix for the floating-point representation it the number of base ibeta digits in the floating-point significand irnd 0 if floating-point addition chops 1 if floating-point addition rounds, but not in the ieee style 2 if floating-point addition rounds in the ieee style 3 if floating-point addition chops, and there is partial underflow 4 if floating-point addition rounds, but not in the ieee style, and there is partial underflow 5 if floating-point addition rounds in the ieee style, and there is partial underflow machep the largest negative integer such that 1.0+float(ibeta)**machep .ne. 1.0, except that machep is bounded below by -(it+3) negeps the largest negative integer such that 1.0-float(ibeta)**negeps .ne. 1.0, except that negeps is bounded below by -(it+3) ***********************************************************************/ void machar(long *ibeta, long *it, long *irnd, long *machep, long *negep, double *eps) { volatile double beta, betain, betah, a, b, ZERO, ONE, TWO, temp, tempa, temp1; long i, itemp; ONE = (double) 1; TWO = ONE + ONE; ZERO = ONE - ONE; a = ONE; temp1 = ONE; while (temp1 - ONE == ZERO) { a = a + a; temp = a + ONE; temp1 = temp - a; } b = ONE; itemp = 0; while (itemp == 0) { b = b + b; temp = a + b; itemp = (long)(temp - a); } *ibeta = itemp; beta = (double) *ibeta; *it = 0; b = ONE; temp1 = ONE; while (temp1 - ONE == ZERO) { *it = *it + 1; b = b * beta; temp = b + ONE; temp1 = temp - b; } *irnd = 0; betah = beta / TWO; temp = a + betah; if (temp - a != ZERO) *irnd = 1; tempa = a + beta; temp = tempa + betah; if ((*irnd == 0) && (temp - tempa != ZERO)) *irnd = 2; *negep = *it + 3; betain = ONE / beta; a = ONE; for (i = 0; i < *negep; i++) a = a * betain; b = a; temp = ONE - a; while (temp-ONE == ZERO) { a = a * beta; *negep = *negep - 1; temp = ONE - a; } *negep = -(*negep); *machep = -(*it) - 3; a = b; temp = ONE + a; while (temp - ONE == ZERO) { a = a * beta; *machep = *machep + 1; temp = ONE + a; } *eps = a; return; }
25,899
448
<reponame>Liorba/iceberg /* * Copyright 2018 Hortonworks * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.orc; import com.google.common.base.Preconditions; import com.netflix.iceberg.Schema; import com.netflix.iceberg.hadoop.HadoopInputFile; import com.netflix.iceberg.hadoop.HadoopOutputFile; import com.netflix.iceberg.io.InputFile; import com.netflix.iceberg.io.OutputFile; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.orc.OrcFile; import org.apache.orc.Reader; import org.apache.orc.TypeDescription; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Properties; public class ORC { private ORC() { } public static WriteBuilder write(OutputFile file) { return new WriteBuilder(file); } public static class WriteBuilder { private final OutputFile file; private final Configuration conf; private Schema schema = null; private Map<String, byte[]> metadata = new HashMap<>(); private WriteBuilder(OutputFile file) { this.file = file; if (file instanceof HadoopOutputFile) { conf = new Configuration(((HadoopOutputFile) file).getConf()); } else { conf = new Configuration(); } } public WriteBuilder metadata(String property, String value) { metadata.put(property, value.getBytes(StandardCharsets.UTF_8)); return this; } public WriteBuilder config(String property, String value) { conf.set(property, value); return this; } public WriteBuilder schema(Schema schema) { this.schema = schema; return this; } public OrcFileAppender build() { OrcFile.WriterOptions options = OrcFile.writerOptions(conf); return new OrcFileAppender(schema, file, options, metadata); } } public static ReadBuilder read(InputFile file) { return new ReadBuilder(file); } public static class ReadBuilder { private final InputFile file; private final Configuration conf; private com.netflix.iceberg.Schema schema = null; private Long start = null; private Long length = null; private ReadBuilder(InputFile file) { Preconditions.checkNotNull(file, "Input file cannot be null"); this.file = file; if (file instanceof HadoopInputFile) { conf = new Configuration(((HadoopInputFile) file).getConf()); } else { conf = new Configuration(); } } /** * Restricts the read to the given range: [start, start + length). * * @param start the start position for this read * @param length the length of the range this read should scan * @return this builder for method chaining */ public ReadBuilder split(long start, long length) { this.start = start; this.length = length; return this; } public ReadBuilder schema(com.netflix.iceberg.Schema schema) { this.schema = schema; return this; } public ReadBuilder config(String property, String value) { conf.set(property, value); return this; } public OrcIterator build() { Preconditions.checkNotNull(schema, "Schema is required"); try { Path path = new Path(file.location()); Reader reader = OrcFile.createReader(path, OrcFile.readerOptions(conf)); ColumnIdMap columnIds = new ColumnIdMap(); TypeDescription orcSchema = TypeConversion.toOrc(schema, columnIds); Reader.Options options = reader.options(); if (start != null) { options.range(start, length); } options.schema(orcSchema); return new OrcIterator(path, orcSchema, reader.rows(options)); } catch (IOException e) { throw new RuntimeException("Can't open " + file.location(), e); } } } }
1,555
14,668
// Copyright 2020 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include <algorithm> #include "base/bind.h" #include "base/callback.h" #include "base/memory/raw_ptr.h" #include "components/cbor/values.h" #include "device/fido/cbor_extract.h" #include "testing/gmock/include/gmock/gmock.h" #include "testing/gtest/include/gtest/gtest.h" namespace device { namespace { using cbor_extract::IntKey; using cbor_extract::Is; using cbor_extract::Map; using cbor_extract::Stop; using cbor_extract::StringKey; template <typename T> bool VectorSpanEqual(const std::vector<T>& v, base::span<const T> s) { if (v.size() != s.size()) { return false; } return std::equal(v.begin(), v.end(), s.begin()); } struct MakeCredRequest { raw_ptr<const std::vector<uint8_t>> client_data_hash; raw_ptr<const std::string> rp_id; raw_ptr<const std::vector<uint8_t>> user_id; raw_ptr<const std::vector<cbor::Value>> cred_params; raw_ptr<const std::vector<cbor::Value>> excluded_credentials; raw_ptr<const bool> resident_key; raw_ptr<const bool> user_verification; raw_ptr<const bool> large_test; raw_ptr<const bool> negative_test; }; TEST(CBORExtract, Basic) { cbor::Value::MapValue rp; rp.emplace("id", "example.com"); rp.emplace("name", "Example"); static const uint8_t kUserId[] = {1, 2, 3, 4}; cbor::Value::MapValue user; user.emplace("id", base::span<const uint8_t>(kUserId)); user.emplace("name", "Joe"); std::vector<cbor::Value> cred_params; static const int64_t kAlgs[] = {-7, -257}; for (const int64_t alg : kAlgs) { cbor::Value::MapValue cred_param; cred_param.emplace("type", "public-key"); cred_param.emplace("alg", alg); cred_params.emplace_back(std::move(cred_param)); } std::vector<cbor::Value> excluded_creds; for (int i = 0; i < 3; i++) { cbor::Value::MapValue excluded_cred; uint8_t id[1] = {static_cast<uint8_t>(i)}; excluded_cred.emplace("type", "public-key"); excluded_cred.emplace("id", base::span<const uint8_t>(id)); excluded_creds.emplace_back(std::move(excluded_cred)); } cbor::Value::MapValue options; options.emplace("rk", true); static const uint8_t kClientDataHash[32] = {4, 3, 2, 1, 0}; cbor::Value::MapValue make_cred; make_cred.emplace(1, base::span<const uint8_t>(kClientDataHash)); make_cred.emplace(2, std::move(rp)); make_cred.emplace(3, std::move(user)); make_cred.emplace(4, std::move(cred_params)); make_cred.emplace(5, std::move(excluded_creds)); make_cred.emplace(7, std::move(options)); make_cred.emplace(100, false); make_cred.emplace(-3, true); static constexpr cbor_extract::StepOrByte<MakeCredRequest> kMakeCredParseSteps[] = { // clang-format off ELEMENT(Is::kRequired, MakeCredRequest, client_data_hash), IntKey<MakeCredRequest>(1), Map<MakeCredRequest>(), IntKey<MakeCredRequest>(2), ELEMENT(Is::kRequired, MakeCredRequest, rp_id), StringKey<MakeCredRequest>(), 'i', 'd', '\0', Stop<MakeCredRequest>(), Map<MakeCredRequest>(), IntKey<MakeCredRequest>(3), ELEMENT(Is::kRequired, MakeCredRequest, user_id), StringKey<MakeCredRequest>(), 'i', 'd', '\0', Stop<MakeCredRequest>(), ELEMENT(Is::kRequired, MakeCredRequest, cred_params), IntKey<MakeCredRequest>(4), ELEMENT(Is::kRequired, MakeCredRequest, excluded_credentials), IntKey<MakeCredRequest>(5), Map<MakeCredRequest>(), IntKey<MakeCredRequest>(7), ELEMENT(Is::kOptional, MakeCredRequest, resident_key), StringKey<MakeCredRequest>(), 'r', 'k', '\0', ELEMENT(Is::kOptional, MakeCredRequest, user_verification), StringKey<MakeCredRequest>(), 'u', 'v', '\0', Stop<MakeCredRequest>(), ELEMENT(Is::kRequired, MakeCredRequest, large_test), IntKey<MakeCredRequest>(100), ELEMENT(Is::kRequired, MakeCredRequest, negative_test), IntKey<MakeCredRequest>(-3), Stop<MakeCredRequest>(), // clang-format on }; MakeCredRequest make_cred_request; ASSERT_TRUE(cbor_extract::Extract<MakeCredRequest>( &make_cred_request, kMakeCredParseSteps, make_cred)); EXPECT_TRUE(VectorSpanEqual<uint8_t>(*make_cred_request.client_data_hash, kClientDataHash)); EXPECT_EQ(*make_cred_request.rp_id, "example.com"); EXPECT_TRUE(VectorSpanEqual<uint8_t>(*make_cred_request.user_id, kUserId)); EXPECT_EQ(make_cred_request.cred_params->size(), 2u); EXPECT_EQ(make_cred_request.excluded_credentials->size(), 3u); EXPECT_TRUE(*make_cred_request.resident_key); EXPECT_TRUE(make_cred_request.user_verification == nullptr); EXPECT_FALSE(*make_cred_request.large_test); EXPECT_TRUE(*make_cred_request.negative_test); std::vector<int64_t> algs; EXPECT_TRUE(cbor_extract::ForEachPublicKeyEntry( *make_cred_request.cred_params, cbor::Value("alg"), base::BindRepeating( [](std::vector<int64_t>* out, const cbor::Value& value) -> bool { if (!value.is_integer()) { return false; } out->push_back(value.GetInteger()); return true; }, base::Unretained(&algs)))); EXPECT_TRUE(VectorSpanEqual<int64_t>(algs, kAlgs)); } TEST(CBORExtract, MissingRequired) { struct Dummy { raw_ptr<const int64_t> value; }; static constexpr cbor_extract::StepOrByte<Dummy> kSteps[] = { ELEMENT(Is::kRequired, Dummy, value), IntKey<Dummy>(1), Stop<Dummy>(), }; cbor::Value::MapValue map; Dummy dummy; EXPECT_FALSE(cbor_extract::Extract<Dummy>(&dummy, kSteps, map)); } TEST(CBORExtract, WrongType) { struct Dummy { raw_ptr<const int64_t> value; }; static constexpr cbor_extract::StepOrByte<Dummy> kSteps[] = { ELEMENT(Is::kRequired, Dummy, value), IntKey<Dummy>(1), Stop<Dummy>(), }; cbor::Value::MapValue map; map.emplace(1, "string"); Dummy dummy; EXPECT_FALSE(cbor_extract::Extract<Dummy>(&dummy, kSteps, map)); } } // namespace } // namespace device
2,637
12,278
<filename>3rdParty/boost/1.71.0/libs/math/test/test_autodiff_5.cpp<gh_stars>1000+ // Copyright <NAME> 2018 - 2019. // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE_1_0.txt or copy at // https://www.boost.org/LICENSE_1_0.txt) #include "test_autodiff.hpp" BOOST_AUTO_TEST_SUITE(test_autodiff_5) BOOST_AUTO_TEST_CASE_TEMPLATE(binomial_hpp, T, all_float_types) { using boost::multiprecision::min; using std::fabs; using std::min; using test_constants = test_constants_t<T>; static constexpr auto m = test_constants::order; test_detail::RandomSample<unsigned> n_sampler{0u, 30u}; test_detail::RandomSample<unsigned> r_sampler{0u, 30u}; for (auto i : boost::irange(test_constants::n_samples)) { std::ignore = i; auto n = n_sampler.next(); auto r = n == 0 ? 0 : (min)(r_sampler.next(), n - 1); // This is a hard function to test for type float due to a specialization of // boost::math::binomial_coefficient auto autodiff_v = std::is_same<T, float>::value ? make_fvar<T, m>(boost::math::binomial_coefficient<T>(n, r)) : boost::math::binomial_coefficient<T>(n, r); auto anchor_v = boost::math::binomial_coefficient<T>(n, r); BOOST_CHECK_EQUAL(autodiff_v.derivative(0u), anchor_v); } } BOOST_AUTO_TEST_CASE_TEMPLATE(cbrt_hpp, T, all_float_types) { using test_constants = test_constants_t<T>; static constexpr auto m = test_constants::order; test_detail::RandomSample<T> x_sampler{-2000, 2000}; for (auto i : boost::irange(test_constants::n_samples)) { std::ignore = i; auto x = x_sampler.next(); BOOST_CHECK_CLOSE(boost::math::cbrt(make_fvar<T, m>(x)).derivative(0u), boost::math::cbrt(x), 50 * test_constants::pct_epsilon()); } } BOOST_AUTO_TEST_CASE_TEMPLATE(chebyshev_hpp, T, all_float_types) { using test_constants = test_constants_t<T>; static constexpr auto m = test_constants::order; { test_detail::RandomSample<unsigned> n_sampler{0u, 10u}; test_detail::RandomSample<T> x_sampler{-2, 2}; for (auto i : boost::irange(test_constants::n_samples)) { std::ignore = i; auto n = n_sampler.next(); auto x = x_sampler.next(); BOOST_CHECK_CLOSE( boost::math::chebyshev_t(n, make_fvar<T, m>(x)).derivative(0u), boost::math::chebyshev_t(n, x), 40 * test_constants::pct_epsilon()); BOOST_CHECK_CLOSE( boost::math::chebyshev_u(n, make_fvar<T, m>(x)).derivative(0u), boost::math::chebyshev_u(n, x), 40 * test_constants::pct_epsilon()); BOOST_CHECK_CLOSE( boost::math::chebyshev_t_prime(n, make_fvar<T, m>(x)).derivative(0u), boost::math::chebyshev_t_prime(n, x), 40 * test_constants::pct_epsilon()); /*/usr/include/boost/math/special_functions/chebyshev.hpp:164:40: error: cannot convert boost::math::differentiation::autodiff_v1::detail::fvar<double, 3> to double in return BOOST_CHECK_EQUAL(boost::math::chebyshev_clenshaw_recurrence(c.data(),c.size(),make_fvar<T,m>(0.20)) , boost::math::chebyshev_clenshaw_recurrence(c.data(),c.size(),static_cast<T>(0.20)));*/ /*try { std::array<T, 4> c0{{14.2, -13.7, 82.3, 96}}; BOOST_CHECK_CLOSE(boost::math::chebyshev_clenshaw_recurrence(c0.data(), c0.size(), make_fvar<T,m>(x)), boost::math::chebyshev_clenshaw_recurrence(c0.data(), c0.size(), x), 10*test_constants::pct_epsilon()); } catch (...) { std::rethrow_exception(std::exception_ptr(std::current_exception())); }*/ } } } BOOST_AUTO_TEST_CASE_TEMPLATE(cospi_hpp, T, all_float_types) { using test_constants = test_constants_t<T>; static constexpr auto m = test_constants::order; test_detail::RandomSample<T> x_sampler{-2000, 2000}; for (auto i : boost::irange(test_constants::n_samples)) { std::ignore = i; auto x = x_sampler.next(); BOOST_CHECK_CLOSE(boost::math::cos_pi(make_fvar<T, m>(x)).derivative(0u), boost::math::cos_pi(x), test_constants::pct_epsilon()); } } BOOST_AUTO_TEST_CASE_TEMPLATE(digamma_hpp, T, all_float_types) { using boost::math::nextafter; using std::nextafter; using test_constants = test_constants_t<T>; static constexpr auto m = test_constants::order; test_detail::RandomSample<T> x_sampler{-1, 2000}; for (auto i : boost::irange(test_constants::n_samples)) { std::ignore = i; auto x = nextafter(x_sampler.next(), ((std::numeric_limits<T>::max))()); auto autodiff_v = boost::math::digamma(make_fvar<T, m>(x)); auto anchor_v = boost::math::digamma(x); BOOST_CHECK_CLOSE(autodiff_v.derivative(0u), anchor_v, 1e4 * test_constants::pct_epsilon()); } } BOOST_AUTO_TEST_SUITE_END()
2,246
310
# Find the frequency of all-number hashids within 1_000_000 encodes for default settings from multiprocessing import Pool, cpu_count import hashids import hashlib procs = cpu_count() num_tests = 1_000 num_iterations = 1_000_000 def find_frequency(i): salt = hashlib.md5(bytes(i)).hexdigest() print("Finding frequency for salt {}".format(salt)) h = hashids.Hashids(salt=salt, min_length=7) count = 0 for i in range(1, num_iterations): a = h.encode(i) try: b = int(a) count += 1 except ValueError: pass print("Frequency for salt {}: {}".format(salt, count)) return count with Pool(procs) as p: frequencies = p.map(find_frequency, range(0, num_tests)) # print("Frequencies: {}".format(frequencies)) print("Average frequency: {:2f}".format(sum(frequencies) / len(frequencies)))
357
4,812
<gh_stars>1000+ //===-- YAMLRemarkParser.h - Parser for YAML remarks ------------*- C++/-*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file provides the impementation of the YAML remark parser. // //===----------------------------------------------------------------------===// #ifndef LLVM_REMARKS_YAML_REMARK_PARSER_H #define LLVM_REMARKS_YAML_REMARK_PARSER_H #include "llvm/ADT/Optional.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Remarks/Remark.h" #include "llvm/Remarks/RemarkParser.h" #include "llvm/Support/Error.h" #include "llvm/Support/MemoryBuffer.h" #include "llvm/Support/SourceMgr.h" #include "llvm/Support/YAMLParser.h" #include "llvm/Support/YAMLTraits.h" #include "llvm/Support/raw_ostream.h" #include <string> namespace llvm { namespace remarks { class YAMLParseError : public ErrorInfo<YAMLParseError> { public: static char ID; YAMLParseError(StringRef Message, SourceMgr &SM, yaml::Stream &Stream, yaml::Node &Node); YAMLParseError(StringRef Message) : Message(Message) {} void log(raw_ostream &OS) const override { OS << Message; } std::error_code convertToErrorCode() const override { return inconvertibleErrorCode(); } private: std::string Message; }; /// Regular YAML to Remark parser. struct YAMLRemarkParser : public RemarkParser { /// The string table used for parsing strings. Optional<ParsedStringTable> StrTab; /// Last error message that can come from the YAML parser diagnostics. /// We need this for catching errors in the constructor. std::string LastErrorMessage; /// Source manager for better error messages. SourceMgr SM; /// Stream for yaml parsing. yaml::Stream Stream; /// Iterator in the YAML stream. yaml::document_iterator YAMLIt; /// If we parse remark metadata in separate mode, we need to open a new file /// and parse that. std::unique_ptr<MemoryBuffer> SeparateBuf; YAMLRemarkParser(StringRef Buf); Expected<std::unique_ptr<Remark>> next() override; static bool classof(const RemarkParser *P) { return P->ParserFormat == Format::YAML; } protected: YAMLRemarkParser(StringRef Buf, Optional<ParsedStringTable> StrTab); /// Create a YAMLParseError error from an existing error generated by the YAML /// parser. /// If there is no error, this returns Success. Error error(); /// Create a YAMLParseError error referencing a specific node. Error error(StringRef Message, yaml::Node &Node); /// Parse a YAML remark to a remarks::Remark object. Expected<std::unique_ptr<Remark>> parseRemark(yaml::Document &Remark); /// Parse the type of a remark to an enum type. Expected<Type> parseType(yaml::MappingNode &Node); /// Parse one key to a string. Expected<StringRef> parseKey(yaml::KeyValueNode &Node); /// Parse one value to a string. virtual Expected<StringRef> parseStr(yaml::KeyValueNode &Node); /// Parse one value to an unsigned. Expected<unsigned> parseUnsigned(yaml::KeyValueNode &Node); /// Parse a debug location. Expected<RemarkLocation> parseDebugLoc(yaml::KeyValueNode &Node); /// Parse an argument. Expected<Argument> parseArg(yaml::Node &Node); }; /// YAML with a string table to Remark parser. struct YAMLStrTabRemarkParser : public YAMLRemarkParser { YAMLStrTabRemarkParser(StringRef Buf, ParsedStringTable StrTab) : YAMLRemarkParser(Buf, std::move(StrTab)) {} static bool classof(const RemarkParser *P) { return P->ParserFormat == Format::YAMLStrTab; } protected: /// Parse one value to a string. Expected<StringRef> parseStr(yaml::KeyValueNode &Node) override; }; Expected<std::unique_ptr<YAMLRemarkParser>> createYAMLParserFromMeta(StringRef Buf, Optional<ParsedStringTable> StrTab = None, Optional<StringRef> ExternalFilePrependPath = None); } // end namespace remarks } // end namespace llvm #endif /* LLVM_REMARKS_YAML_REMARK_PARSER_H */
1,416
2,921
<filename>blockchains/smartchain/assets/0x4354a4F710182966E55EA30CFa807FA1b821a67b/info.json<gh_stars>1000+ { "name": "<NAME>", "type": "BEP20", "symbol": "Foxy", "decimals": 9, "website": "https://foxynft.org/", "description": "Foxy Equilibrium is an experimental, play to earn NFT game on the BSC network which integrates (BEP721) tokens into the dAPP. Players can claim daily rewards, feed pets, battle with others and more!.", "explorer": "https://bscscan.com/token/0x4354a4f710182966e55ea30cfa807fa1b821a67b", "status": "active", "id": "0x4354a4F710182966E55EA30CFa807FA1b821a67b" }
264
409
namespace xlang::param { struct hstring { hstring() noexcept : m_handle(nullptr) {} hstring(hstring const& values) = delete; hstring& operator=(hstring const& values) = delete; hstring(std::nullptr_t) = delete; ~hstring() noexcept { xlang_delete_string(m_handle); } hstring(xlang::hstring const& value) noexcept : m_handle(get_abi(value)) { } // char8_t overloads hstring(std::basic_string_view<xlang_char8> const& value) noexcept { init<xlang_char8, false>(value); } hstring(std::basic_string<xlang_char8> const& value) noexcept { init<xlang_char8, true>(value); } hstring(xlang_char8 const* const value) noexcept { init<xlang_char8, true>(value); } // char16_t overloads hstring(std::basic_string_view<char16_t> const& value) noexcept { init<char16_t, false>(value); } hstring(std::basic_string<char16_t> const& value) noexcept { init<char16_t, true>(value); } hstring(char16_t const* const value) noexcept { init<char16_t, true>(value); } #ifdef _WIN32 // wchar_t overloads hstring(std::basic_string_view<wchar_t> const& value) noexcept { init<wchar_t, false>(value); } hstring(std::basic_string<wchar_t> const& value) noexcept { init<wchar_t, true>(value); } hstring(wchar_t const* const value) noexcept { init<wchar_t, true>(value); } #endif operator xlang::hstring const&() const noexcept { return *reinterpret_cast<xlang::hstring const*>(this); } private: template <typename char_type, bool is_safe> void init(std::basic_string_view<char_type> str) noexcept { static_assert(impl::is_char_type_supported<char_type>::value); auto const value = impl::normalize_char_type(str); auto const length = static_cast<uint32_t>(value.size()); if constexpr (is_safe) { if constexpr (sizeof(char_type) == sizeof(xlang_char8)) { XLANG_VERIFY_(nullptr, xlang_create_string_reference_utf8(value.data(), length, &m_header, &m_handle)); } else { XLANG_VERIFY_(nullptr, xlang_create_string_reference_utf16(value.data(), length, &m_header, &m_handle)); } } else { if constexpr (sizeof(char_type) == sizeof(xlang_char8)) { if (nullptr != xlang_create_string_reference_utf8(value.data(), length, &m_header, &m_handle)) { std::terminate(); } } else { if (nullptr != xlang_create_string_reference_utf16(value.data(), length, &m_header, &m_handle)) { std::terminate(); } } } } xlang_string m_handle; xlang_string_header m_header; }; inline xlang_string get_abi(hstring const& object) noexcept { return *(xlang_string*)(&object); } } namespace xlang::impl { template <typename T> using param_type = std::conditional_t<std::is_same_v<T, hstring>, param::hstring, T>; }
1,956
480
<gh_stars>100-1000 /* * Copyright [2013-2021], Alibaba Group Holding Limited * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.alibaba.polardbx.optimizer.core.rel; import com.alibaba.polardbx.common.jdbc.ParameterContext; import com.alibaba.polardbx.common.utils.Pair; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.dialect.DbType; import com.alibaba.polardbx.optimizer.utils.PlannerUtils; import com.alibaba.polardbx.optimizer.utils.RelUtils; import org.apache.calcite.plan.RelOptCluster; import org.apache.calcite.plan.RelTraitSet; import org.apache.calcite.rel.RelWriter; import org.apache.calcite.rel.externalize.RelDrdsWriter; import org.apache.calcite.sql.SqlNode; import java.util.HashMap; import java.util.List; import java.util.Map; /** * @author chenmo.cm */ public class PhyQueryOperation extends BaseQueryOperation { private Map<Integer, ParameterContext> param; public PhyQueryOperation(RelOptCluster cluster, RelTraitSet traitSet, SqlNode nativeSqlNode, String dbIndex, Map<Integer, ParameterContext> param) { this(cluster, traitSet, nativeSqlNode, dbIndex, param, PlannerUtils.getDynamicParamIndex(nativeSqlNode)); } public PhyQueryOperation(RelOptCluster cluster, RelTraitSet traitSet, SqlNode nativeSqlNode, String dbIndex, Map<Integer, ParameterContext> param, List<Integer> dynamicParamIndex) { super(cluster, traitSet, RelUtils.toNativeSqlLine(nativeSqlNode), nativeSqlNode, DbType.MYSQL); this.dbIndex = dbIndex; this.param = new HashMap<>(); // fix unmatched param index (e.g. limit ?, ?) int index = 1; for (Integer i : dynamicParamIndex) { final ParameterContext parameterContext = param.get(i + 1); this.param.put(index, PlannerUtils.changeParameterContextIndex(parameterContext, index)); index++; } } @Override public Pair<String, Map<Integer, ParameterContext>> getDbIndexAndParam(Map<Integer, ParameterContext> param, ExecutionContext executionContext) { return Pair.of(this.dbIndex, this.param); } @Override public RelWriter explainTermsForDisplay(RelWriter pw) { pw.item(RelDrdsWriter.REL_NAME, getExplainName()); pw.item("node", dbIndex); pw.item("sql", this.sqlTemplate); return pw; } public Map<Integer, ParameterContext> getParam() { return param; } public void setParam(Map<Integer, ParameterContext> param) { this.param = param; } }
1,223
4,767
package com.roncoo.pay.trade.bo; import javax.validation.constraints.Digits; import javax.validation.constraints.NotNull; import javax.validation.constraints.Size; import java.io.Serializable; import java.math.BigDecimal; /** * 用户主扫请求Bo */ public class ScanPayRequestBo implements Serializable { @Size(min = 16 , max = 32 , message = "商户Key[payKey]长度最小16位最大32位") @NotNull(message = "商户Key[payKey]不能为空") private String payKey; @Size(max = 200 , message = "商品名称[productName]长度最大200位") @NotNull(message = "商品名称[productName]不能为空") private String productName; @Size(min = 5 , max = 20 , message = "商品订单号[orderNo]长度最小5位,最大20位") @NotNull(message = "商品订单号[orderNo]不能为空") private String orderNo; @Digits(integer = 12, fraction = 2, message = "订单金额格式有误") @NotNull(message = "订单金额[orderPrice]不能为空") private BigDecimal orderPrice; @Size(min = 1 , max = 20 , message = "订单IP[orderIp]长度最小1位,最大20位") @NotNull(message = "订单IP[orderIp]不能为空") private String orderIp; @Size(min = 1 , max = 8 , message = "订单日期[orderDate]长度最小1位,最大8位") @NotNull(message = "订单日期[orderDate]不能为空") private String orderDate; @Size(min = 1 , max = 14 , message = "订单时间[orderTime]长度最小1位,最大14位") @NotNull(message = "订单时间[orderTime]不能为空") private String orderTime; @NotNull(message = "订单有效期[orderPeriod]不能为空") private Integer orderPeriod; @Size(min = 1 , max = 200 , message = "页面跳转地址[returnUrl]长度最小1位,最大200位") @NotNull(message = "页面跳转地址[returnUrl]不能为空") private String returnUrl; @Size(min = 1 , max = 200 , message = "异步通知地址[notifyUrl]长度最小1位,最大200位") @NotNull(message = "异步通知地址[notifyUrl]不能为空") private String notifyUrl; @NotNull(message = "签名[sign]不能为空") private String sign; private String remark;//支付备注 /** * 支付类型 */ private String payType;//支付类型 /** * 分期付款笔数 */ private Integer numberOfStages;//分期笔数 public String getPayKey() { return payKey; } public void setPayKey(String payKey) { this.payKey = payKey; } public String getProductName() { return productName; } public void setProductName(String productName) { this.productName = productName; } public String getOrderNo() { return orderNo; } public void setOrderNo(String orderNo) { this.orderNo = orderNo; } public BigDecimal getOrderPrice() { return orderPrice; } public void setOrderPrice(BigDecimal orderPrice) { this.orderPrice = orderPrice; } public String getOrderIp() { return orderIp; } public void setOrderIp(String orderIp) { this.orderIp = orderIp; } public String getOrderDate() { return orderDate; } public void setOrderDate(String orderDate) { this.orderDate = orderDate; } public String getOrderTime() { return orderTime; } public void setOrderTime(String orderTime) { this.orderTime = orderTime; } public Integer getOrderPeriod() { return orderPeriod; } public void setOrderPeriod(Integer orderPeriod) { this.orderPeriod = orderPeriod; } public String getReturnUrl() { return returnUrl; } public void setReturnUrl(String returnUrl) { this.returnUrl = returnUrl; } public String getNotifyUrl() { return notifyUrl; } public void setNotifyUrl(String notifyUrl) { this.notifyUrl = notifyUrl; } public String getSign() { return sign; } public void setSign(String sign) { this.sign = sign; } public String getRemark() { return remark; } public void setRemark(String remark) { this.remark = remark; } public String getPayType() { return payType; } public void setPayType(String payType) { this.payType = payType; } public Integer getNumberOfStages() { return numberOfStages; } public void setNumberOfStages(Integer numberOfStages) { this.numberOfStages = numberOfStages; } @Override public String toString() { return "ScanPayRequestBo{" + "payKey='" + payKey + '\'' + ", productName='" + productName + '\'' + ", orderNo='" + orderNo + '\'' + ", orderPrice=" + orderPrice + ", orderIp='" + orderIp + '\'' + ", orderDate='" + orderDate + '\'' + ", orderTime='" + orderTime + '\'' + ", orderPeriod=" + orderPeriod + ", returnUrl='" + returnUrl + '\'' + ", notifyUrl='" + notifyUrl + '\'' + ", sign='" + sign + '\'' + ", remark='" + remark + '\'' + ", payType='" + payType + '\'' + ", numberOfStages=" + numberOfStages + '}'; } }
2,550
1,382
<filename>examples/firdespm_callback_example.c // // firdespm_callback_example.c // // This example demonstrates finite impulse response filter design // using the Parks-McClellan algorithm with callback function for // arbitrary response and weighting function. // // SEE ALSO: firdes_kaiser_example.c #include <stdio.h> #include <stdlib.h> #include <math.h> #include "liquid.h" #define OUTPUT_FILENAME "firdespm_callback_example.m" // user-defined callback function defining response and weights int callback(double _frequency, void * _userdata, double * _desired, double * _weight) { // de-reference pointer as floating-point value unsigned int n = *((unsigned int*)_userdata); double v = sincf(n*_frequency); double fc = 1.0f / (float)n; // inverse sinc if (_frequency < fc) { *_desired = 1.0f / v; // inverse of sinc *_weight = 4.0f; } else { *_desired = 0.0f; // stop-band *_weight = 10*fabs(v) * exp(4.0*_frequency); } return 0; } int main(int argc, char*argv[]) { // filter design parameters unsigned int n = 8; // sinc filter length unsigned int h_len = 81; // inverse sinc filter length liquid_firdespm_btype btype = LIQUID_FIRDESPM_BANDPASS; unsigned int num_bands = 2; float bands[4] = {0.00f, 0.75f/(float)n, // pass-band 1.05f/(float)n, 0.5f}; // stop-band // design filter float h[h_len]; firdespm q = firdespm_create_callback(h_len,num_bands,bands,btype,callback,&n); firdespm_execute(q,h); firdespm_destroy(q); // print coefficients unsigned int i; for (i=0; i<h_len; i++) printf("h(%4u) = %16.12f;\n", i+1, h[i]); // open output file FILE*fid = fopen(OUTPUT_FILENAME,"w"); fprintf(fid,"%% %s : auto-generated file\n", OUTPUT_FILENAME); fprintf(fid,"clear all;\n"); fprintf(fid,"close all;\n\n"); fprintf(fid,"n=%u;\n", n); fprintf(fid,"h_len=%u;\n", h_len); for (i=0; i<h_len; i++) fprintf(fid,"h(%4u) = %20.8e;\n", i+1, h[i]); fprintf(fid,"nfft=1024;\n"); fprintf(fid,"H0=20*log10(abs(fftshift(fft(ones(1,n)/n,nfft))));\n"); fprintf(fid,"H1=20*log10(abs(fftshift(fft(h, nfft))));\n"); fprintf(fid,"Hc=H0+H1;\n"); fprintf(fid,"f=[0:(nfft-1)]/nfft-0.5;\n"); fprintf(fid,"figure;\n"); fprintf(fid,"hold on;\n"); fprintf(fid,"plot(f,H0,'Color',[0.5 0.5 0.5],'LineWidth',1);\n"); fprintf(fid,"plot(f,H1,'Color',[0.0 0.2 0.5],'LineWidth',1);\n"); fprintf(fid,"plot(f,Hc,'Color',[0.0 0.5 0.2],'LineWidth',2);\n"); fprintf(fid,"hold off;\n"); fprintf(fid,"grid on;\n"); fprintf(fid,"xlabel('normalized frequency');\n"); fprintf(fid,"ylabel('PSD [dB]');\n"); fprintf(fid,"legend('sinc','inverse sinc','composite');\n"); fprintf(fid,"title('Filter design (firdespm), inverse sinc');\n"); fprintf(fid,"axis([-0.5 0.5 -80 20]);\n"); fclose(fid); printf("results written to %s.\n", OUTPUT_FILENAME); printf("done.\n"); return 0; }
1,499
523
package io.github.droidkaigi.confsched2017.viewmodel; import com.android.databinding.library.baseAdapters.BR; import android.databinding.BaseObservable; import android.databinding.Bindable; import javax.inject.Inject; import io.github.droidkaigi.confsched2017.di.scope.ActivityScope; @ActivityScope public class ToolbarViewModel extends BaseObservable implements ViewModel { private String toolbarTitle; @Inject public ToolbarViewModel() { } @Bindable public String getToolbarTitle() { return toolbarTitle; } public void setToolbarTitle(String title) { toolbarTitle = title; notifyPropertyChanged(BR.toolbarTitle); } @Override public void destroy() { // Nothing to do } }
274
1,233
<reponame>calvin681/mantis<gh_stars>1000+ /* * Copyright 2019 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.mantisrx.server.master.mesos; import com.netflix.fenzo.VirtualMachineLease; import io.mantisrx.server.master.config.MasterConfiguration; import io.mantisrx.server.master.scheduler.JobMessageRouter; import io.mantisrx.server.master.scheduler.WorkerRegistry; import io.mantisrx.shaded.com.google.common.base.Preconditions; import java.util.List; import java.util.Optional; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Supplier; import org.apache.mesos.MesosSchedulerDriver; import org.apache.mesos.Protos; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import rx.Observer; import rx.functions.Action1; public class MesosDriverSupplier implements Supplier<MesosSchedulerDriver> { private static final Logger logger = LoggerFactory.getLogger(MesosDriverSupplier.class); private final MasterConfiguration masterConfig; private final Observer<String> vmLeaseRescindedObserver; private final JobMessageRouter jobMessageRouter; private final WorkerRegistry workerRegistry; private final AtomicReference<MesosSchedulerDriver> mesosDriverRef = new AtomicReference<>(null); private final AtomicBoolean isInitialized = new AtomicBoolean(false); private volatile Action1<List<VirtualMachineLease>> addVMLeaseAction = null; private final AtomicInteger numAttemptsToInit = new AtomicInteger(0); public MesosDriverSupplier(final MasterConfiguration masterConfig, final Observer<String> vmLeaseRescindedObserver, final JobMessageRouter jobMessageRouter, final WorkerRegistry workerRegistry) { this.masterConfig = masterConfig; this.vmLeaseRescindedObserver = vmLeaseRescindedObserver; this.jobMessageRouter = jobMessageRouter; this.workerRegistry = workerRegistry; } Optional<MesosSchedulerDriver> initMesosSchedulerDriverWithTimeout(MesosSchedulerCallbackHandler mesosSchedulerCallbackHandler, Protos.FrameworkInfo framework) { ExecutorService executorService = Executors.newSingleThreadExecutor(); int mesosSchedulerDriverInitTimeoutSec = masterConfig.getMesosSchedulerDriverInitTimeoutSec(); logger.info("initializing mesos scheduler driver with timeout of {} sec", mesosSchedulerDriverInitTimeoutSec); Optional<MesosSchedulerDriver> mesosSchedulerDriverO = Optional.empty(); try { Future<MesosSchedulerDriver> driverF = executorService.submit(() -> new MesosSchedulerDriver(mesosSchedulerCallbackHandler, framework, masterConfig.getMasterLocation())); MesosSchedulerDriver mesosSchedulerDriver = driverF.get(mesosSchedulerDriverInitTimeoutSec, TimeUnit.SECONDS); mesosSchedulerDriverO = Optional.ofNullable(mesosSchedulerDriver); } catch (Exception e) { logger.info("failed to initialize MesosSchedulerDriver", e); } finally { executorService.shutdown(); } return mesosSchedulerDriverO; } @Override public MesosSchedulerDriver get() { if (addVMLeaseAction == null) { logger.warn("addVMLeaseAction is null, attempt to get Mesos Driver before MesosDriverSupplier init"); throw new IllegalStateException("addVMLeaseAction must be set before creating MesosSchedulerDriver"); } if (isInitialized.compareAndSet(false, true)) { if (numAttemptsToInit.incrementAndGet() > masterConfig.getMesosSchedulerDriverInitMaxAttempts()) { logger.error("Failed to initialize Mesos scheduler driver after {} attempts, will terminate master", numAttemptsToInit.get() - 1); System.exit(2); } logger.info("initializing mesos scheduler callback handler"); final MesosSchedulerCallbackHandler mesosSchedulerCallbackHandler = new MesosSchedulerCallbackHandler(addVMLeaseAction, vmLeaseRescindedObserver, jobMessageRouter, workerRegistry); final Protos.FrameworkInfo framework = Protos.FrameworkInfo.newBuilder() .setUser("") .setName(masterConfig.getMantisFrameworkName()) .setFailoverTimeout(masterConfig.getMesosFailoverTimeOutSecs()) .setId(Protos.FrameworkID.newBuilder().setValue(masterConfig.getMantisFrameworkName())) .setCheckpoint(true) .build(); logger.info("initializing mesos scheduler driver"); MesosSchedulerDriver mesosDriver = initMesosSchedulerDriverWithTimeout(mesosSchedulerCallbackHandler, framework).orElseGet(() -> { logger.info("initialize MesosSchedulerDriver failed, will retry"); isInitialized.compareAndSet(true, false); return this.get(); }); boolean result = mesosDriverRef.compareAndSet(null, mesosDriver); logger.info("initialized mesos scheduler driver {}", result); } else { int sleepIntervalMillis = 1000; int maxTimeToWaitMillis = masterConfig.getMesosSchedulerDriverInitMaxAttempts() * masterConfig.getMesosSchedulerDriverInitTimeoutSec() * 1000; // block maxTimeToWaitMillis till mesosDriver is not null while (mesosDriverRef.get() == null) { if (maxTimeToWaitMillis <= 0) { logger.error("mesos driver init taking too long, exiting"); System.exit(2); } try { logger.info("mesos scheduler driver null, sleep for 1 sec awaiting init"); Thread.sleep(sleepIntervalMillis); maxTimeToWaitMillis -= sleepIntervalMillis; } catch (InterruptedException e) { logger.warn("thread interrupted during sleep", e); Thread.currentThread().interrupt(); } } } return mesosDriverRef.get(); } public void setAddVMLeaseAction(final Action1<List<VirtualMachineLease>> addVMLeaseAction) { Preconditions.checkNotNull(addVMLeaseAction); this.addVMLeaseAction = addVMLeaseAction; } public void shutdown() { MesosSchedulerDriver mesosSchedulerDriver = mesosDriverRef.get(); if (mesosSchedulerDriver != null) { mesosSchedulerDriver.stop(true); } else { logger.info("mesos driver null, continue shutdown"); } } }
3,022
2,472
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // Please refer to the license text that comes with this tendis open source // project for additional information. #ifdef _WIN32 #include <stdlib.h> #include <assert.h> #include <time.h> #include <unistd.h> #include <windows.h> #include <WinBase.h> #include "tendisplus/utils/invariant.h" int gettimeofday(struct timeval* tp, void* tzp) { unsigned int ticks; ticks = GetTickCount(); tp->tv_usec = ticks * 1000; tp->tv_sec = ticks / 1000; return 0; } void sleep(uint64_t seconds) { Sleep(seconds * 1000); } int rand_r(unsigned int* seedp) { srand(*seedp ? *seedp : (unsigned)time(NULL)); return rand(); } typedef struct tagTHREADNAME_INFO { DWORD dwType; // must be 0x1000 LPCSTR szName; // pointer to name (in user addr space) DWORD dwThreadID; // thread ID (-1=caller thread) DWORD dwFlags; // reserved for future use, must be zero } THREADNAME_INFO; void SetThreadName(DWORD dwThreadID, LPCSTR szThreadName) { THREADNAME_INFO info; info.dwType = 0x1000; info.szName = szThreadName; info.dwThreadID = dwThreadID; info.dwFlags = 0; if (strlen(szThreadName) > 15) { abort(); } __try { RaiseException(0x406D1388, 0, sizeof(info) / sizeof(DWORD), (ULONG_PTR*)&info); // NOLINT } __except(EXCEPTION_CONTINUE_EXECUTION) { } } int pthread_setname_np(uint32_t id, const char* name) { SetThreadName((DWORD)id, name); return 0; } struct tm* mylocaltime_r(const time_t* timep, struct tm* result) { localtime_s(result, timep); return result; } #endif // _WIN32
686
348
<filename>docs/data/leg-t2/054/05404147.json {"nom":"Crion","circ":"4ème circonscription","dpt":"Meurthe-et-Moselle","inscrits":84,"abs":30,"votants":54,"blancs":7,"nuls":1,"exp":46,"res":[{"nuance":"LR","nom":"<NAME>","voix":28},{"nuance":"REM","nom":"<NAME>","voix":18}]}
116
722
//MIT License // //Copyright (c) 2019 <NAME> // //Permission is hereby granted, free of charge, to any person obtaining a copy //of this software and associated documentation files (the "Software"), to deal //in the Software without restriction, including without limitation the rights //to use, copy, modify, merge, publish, distribute, sublicense, and/or sell //copies of the Software, and to permit persons to whom the Software is //furnished to do so, subject to the following conditions: // //The above copyright notice and this permission notice shall be included in all //copies or substantial portions of the Software. // //THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR //IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, //FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE //AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER //LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, //OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE //SOFTWARE. #include <bitsery/ext/inheritance.h> #include <bitsery/ext/pointer.h> #include <bitsery/ext/std_smart_ptr.h> #include <gmock/gmock.h> #include "serialization_test_utils.h" using bitsery::ext::BaseClass; using bitsery::ext::VirtualBaseClass; using bitsery::ext::InheritanceContext; using bitsery::ext::PointerLinkingContext; using bitsery::ext::PolymorphicContext; using bitsery::ext::StandardRTTI; using bitsery::ext::PointerOwner; using bitsery::ext::PointerObserver; using bitsery::ext::ReferencedByPointer; using bitsery::ext::StdSmartPtr; using testing::Eq; using TContext = std::tuple<PointerLinkingContext, InheritanceContext, PolymorphicContext<StandardRTTI>>; using SerContext = BasicSerializationContext<TContext>; //this is useful for PolymorphicContext to bind classes to serializer/deserializer using TSerializer = typename SerContext::TSerializer; using TDeserializer = typename SerContext::TDeserializer; /* * base class */ struct Base { Base() = default; explicit Base(uint64_t v) : x{v} {} uint64_t x{}; virtual ~Base() = default; }; template<typename S> void serialize(S& s, Base& o) { s.value8b(o.x); } struct Derived1 : Base { Derived1() = default; Derived1(uint64_t x_, uint64_t y_) : Base{x_}, y1{y_} {} friend bool operator==(const Derived1& lhs, const Derived1& rhs) { return lhs.x == rhs.x && lhs.y1 == rhs.y1; } uint64_t y1{}; }; template<typename S> void serialize(S& s, Derived1& o) { s.ext(o, BaseClass<Base>{}); s.value8b(o.y1); } struct Derived2 : Base { uint64_t y1{}; uint64_t y2{}; }; template<typename S> void serialize(S& s, Derived2& o) { s.ext(o, BaseClass<Base>{}); s.value8b(o.y1); s.value8b(o.y2); } // polymorphic structure that contains polymorphic pointer, to test memory resource propagation struct PolyPtrWithPolyPtrBase { std::unique_ptr<Base> ptr{}; virtual ~PolyPtrWithPolyPtrBase() = default; }; template<typename S> void serialize(S& s, PolyPtrWithPolyPtrBase& o) { s.ext(o.ptr, StdSmartPtr{}); } struct DerivedPolyPtrWithPolyPtr : PolyPtrWithPolyPtrBase { }; template<typename S> void serialize(S& s, DerivedPolyPtrWithPolyPtr& o) { s.ext(o.ptr, StdSmartPtr{}); } //define relationships between base class and derived classes for runtime polymorphism namespace bitsery { namespace ext { template<> struct PolymorphicBaseClass<Base> : PolymorphicDerivedClasses<Derived1, Derived2> { }; template<> struct PolymorphicBaseClass<PolyPtrWithPolyPtrBase> : PolymorphicDerivedClasses<DerivedPolyPtrWithPolyPtr> { }; } } // this class is for testing struct TestAllocInfo { void* ptr; size_t bytes; size_t alignment; size_t typeId; friend bool operator==(const TestAllocInfo& lhs, const TestAllocInfo& rhs) { return std::tie(lhs.ptr, lhs.bytes, lhs.alignment, lhs.typeId) == std::tie(rhs.ptr, rhs.bytes, rhs.alignment, rhs.typeId); } }; struct MemResourceForTest : public bitsery::ext::MemResourceBase { void* allocate(size_t bytes, size_t alignment, size_t typeId) override { const auto res = bitsery::ext::MemResourceNewDelete{}.allocate(bytes, alignment, typeId); allocs.push_back({res, bytes, alignment, typeId}); return res; } void deallocate(void* ptr, size_t bytes, size_t alignment, size_t typeId) noexcept override { deallocs.push_back({ptr, bytes, alignment, typeId}); bitsery::ext::MemResourceNewDelete{}.deallocate(ptr, bytes, alignment, typeId); } std::vector<TestAllocInfo> allocs{}; std::vector<TestAllocInfo> deallocs{}; }; class SerializeExtensionPointerWithAllocator : public testing::Test { public: TContext plctx{}; SerContext sctx{}; typename SerContext::TSerializer& createSerializer() { auto& res = sctx.createSerializer(plctx); std::get<2>(plctx).clear(); //bind serializer with classes std::get<2>(plctx).registerBasesList<SerContext::TSerializer>( bitsery::ext::PolymorphicClassesList<Base, PolyPtrWithPolyPtrBase>{}); return res; } typename SerContext::TDeserializer& createDeserializer() { auto& res = sctx.createDeserializer(plctx); std::get<2>(plctx).clear(); //bind deserializer with classes std::get<2>(plctx).registerBasesList<SerContext::TDeserializer>( bitsery::ext::PolymorphicClassesList<Base, PolyPtrWithPolyPtrBase>{}); return res; } bool isPointerContextValid() { return std::get<0>(plctx).isValid(); } virtual void TearDown() override { EXPECT_TRUE(isPointerContextValid()); } }; TEST_F(SerializeExtensionPointerWithAllocator, CanSetDefaultMemoryResourceInPointerLinkingContext) { MemResourceForTest memRes{}; std::get<0>(plctx).setMemResource(&memRes); Base* baseData = new Derived1{2, 1}; createSerializer().ext(baseData, PointerOwner{}); Base* baseRes = nullptr; createDeserializer().ext(baseRes, PointerOwner{}); auto dData = dynamic_cast<Derived1*>(baseData); auto dRes = dynamic_cast<Derived1*>(baseRes); EXPECT_THAT(dRes, ::testing::NotNull()); EXPECT_THAT(*dData, *dRes); EXPECT_THAT(memRes.allocs.size(), Eq(1u)); EXPECT_THAT(memRes.allocs[0].bytes, Eq(sizeof(Derived1))); EXPECT_THAT(memRes.allocs[0].alignment, Eq(alignof(Derived1))); EXPECT_THAT(memRes.allocs[0].typeId, Eq(bitsery::ext::StandardRTTI::get<Derived1>())); EXPECT_THAT(memRes.deallocs.size(), Eq(0u)); delete dData; delete dRes; } TEST_F(SerializeExtensionPointerWithAllocator, CorrectlyDeallocatesPreviousInstance) { MemResourceForTest memRes{}; std::get<0>(plctx).setMemResource(&memRes); Base* baseData = new Derived1{2, 1}; createSerializer().ext(baseData, PointerOwner{}); Base* baseRes = new Derived2; createDeserializer().ext(baseRes, PointerOwner{}); auto dData = dynamic_cast<Derived1*>(baseData); auto dRes = dynamic_cast<Derived1*>(baseRes); EXPECT_THAT(dRes, ::testing::NotNull()); EXPECT_THAT(*dData, *dRes); EXPECT_THAT(memRes.allocs.size(), Eq(1u)); EXPECT_THAT(memRes.allocs[0].bytes, Eq(sizeof(Derived1))); EXPECT_THAT(memRes.allocs[0].alignment, Eq(alignof(Derived1))); EXPECT_THAT(memRes.allocs[0].typeId, Eq(bitsery::ext::StandardRTTI::get<Derived1>())); EXPECT_THAT(memRes.deallocs.size(), Eq(1u)); EXPECT_THAT(memRes.deallocs[0].bytes, Eq(sizeof(Derived2))); EXPECT_THAT(memRes.deallocs[0].alignment, Eq(alignof(Derived2))); EXPECT_THAT(memRes.deallocs[0].typeId, Eq(bitsery::ext::StandardRTTI::get<Derived2>())); delete dData; delete dRes; } TEST_F(SerializeExtensionPointerWithAllocator, DefaultDeleterIsNotUsedForStdUniquePtr) { MemResourceForTest memRes{}; std::get<0>(plctx).setMemResource(&memRes); std::unique_ptr<Base> baseData{}; createSerializer().ext(baseData, StdSmartPtr{}); auto baseRes = std::unique_ptr<Base>(new Derived1{45, 64}); createDeserializer().ext(baseRes, StdSmartPtr{}); EXPECT_THAT(memRes.allocs.size(), Eq(0u)); EXPECT_THAT(memRes.deallocs.size(), Eq(1u)); EXPECT_THAT(memRes.deallocs[0].bytes, Eq(sizeof(Derived1))); EXPECT_THAT(memRes.deallocs[0].alignment, Eq(alignof(Derived1))); EXPECT_THAT(memRes.deallocs[0].typeId, Eq(bitsery::ext::StandardRTTI::get<Derived1>())); } struct CustomBaseDeleter { void operator()(Base* obj) { delete obj; } }; TEST_F(SerializeExtensionPointerWithAllocator, CustomDeleterIsNotUsedForStdUniquePtr) { MemResourceForTest memRes{}; std::get<0>(plctx).setMemResource(&memRes); std::unique_ptr<Base, CustomBaseDeleter> baseData{}; createSerializer().ext(baseData, StdSmartPtr{}); auto baseRes = std::unique_ptr<Base, CustomBaseDeleter>(new Derived1{45, 64}); createDeserializer().ext(baseRes, StdSmartPtr{}); EXPECT_THAT(memRes.allocs.size(), Eq(0u)); EXPECT_THAT(memRes.deallocs.size(), Eq(1u)); EXPECT_THAT(memRes.deallocs[0].bytes, Eq(sizeof(Derived1))); EXPECT_THAT(memRes.deallocs[0].alignment, Eq(alignof(Derived1))); EXPECT_THAT(memRes.deallocs[0].typeId, Eq(bitsery::ext::StandardRTTI::get<Derived1>())); } TEST_F(SerializeExtensionPointerWithAllocator, CanSetMemResourcePerPointer) { MemResourceForTest memRes1{}; MemResourceForTest memRes2{}; std::get<0>(plctx).setMemResource(&memRes1); Base* baseData = new Derived1{2, 1}; createSerializer().ext(baseData, PointerOwner{bitsery::ext::PointerType::Nullable, &memRes2}); Base* baseRes = new Derived2; createDeserializer().ext(baseRes, PointerOwner{bitsery::ext::PointerType::Nullable, &memRes2}); auto dData = dynamic_cast<Derived1*>(baseData); auto dRes = dynamic_cast<Derived1*>(baseRes); EXPECT_THAT(dRes, ::testing::NotNull()); EXPECT_THAT(*dData, *dRes); EXPECT_THAT(memRes1.allocs.size(), Eq(0u)); EXPECT_THAT(memRes1.deallocs.size(), Eq(0u)); EXPECT_THAT(memRes2.allocs.size(), Eq(1u)); EXPECT_THAT(memRes2.allocs[0].bytes, Eq(sizeof(Derived1))); EXPECT_THAT(memRes2.allocs[0].alignment, Eq(alignof(Derived1))); EXPECT_THAT(memRes2.allocs[0].typeId, Eq(bitsery::ext::StandardRTTI::get<Derived1>())); EXPECT_THAT(memRes2.deallocs.size(), Eq(1u)); EXPECT_THAT(memRes2.deallocs[0].bytes, Eq(sizeof(Derived2))); EXPECT_THAT(memRes2.deallocs[0].alignment, Eq(alignof(Derived2))); EXPECT_THAT(memRes2.deallocs[0].typeId, Eq(bitsery::ext::StandardRTTI::get<Derived2>())); delete dData; delete dRes; } TEST_F(SerializeExtensionPointerWithAllocator, MemResourceSetPerPointerByDefaultDoNotPropagate) { MemResourceForTest memRes1{}; MemResourceForTest memRes2{}; std::get<0>(plctx).setMemResource(&memRes1); auto data = std::unique_ptr<PolyPtrWithPolyPtrBase>(new PolyPtrWithPolyPtrBase{}); data->ptr = std::unique_ptr<Base>(new Derived1{5, 6}); createSerializer().ext(data, StdSmartPtr{bitsery::ext::PointerType::Nullable, &memRes2}); auto res = std::unique_ptr<PolyPtrWithPolyPtrBase>(new DerivedPolyPtrWithPolyPtr{}); res->ptr = std::unique_ptr<Base>(new Derived2{}); createDeserializer().ext(res, StdSmartPtr{bitsery::ext::PointerType::Nullable, &memRes2}); EXPECT_THAT(memRes1.allocs.size(), Eq(1u)); // Base* was destroyed by unique_ptr on PolyPtrWithPolyPtrBase destructor, hence == 0 EXPECT_THAT(memRes1.deallocs.size(), Eq(0u)); EXPECT_THAT(memRes2.allocs.size(), Eq(1u)); EXPECT_THAT(memRes2.deallocs.size(), Eq(1u)); } TEST_F(SerializeExtensionPointerWithAllocator, MemResourceSetPerPointerCanPropagate) { MemResourceForTest memRes1{}; MemResourceForTest memRes2{}; std::get<0>(plctx).setMemResource(&memRes1); auto data = std::unique_ptr<PolyPtrWithPolyPtrBase>(new PolyPtrWithPolyPtrBase{}); data->ptr = std::unique_ptr<Base>(new Derived1{5, 6}); createSerializer().ext(data, StdSmartPtr{bitsery::ext::PointerType::Nullable, &memRes2, true}); auto res = std::unique_ptr<PolyPtrWithPolyPtrBase>(new DerivedPolyPtrWithPolyPtr{}); res->ptr = std::unique_ptr<Base>(new Derived2{}); createDeserializer().ext(res, StdSmartPtr{bitsery::ext::PointerType::Nullable, &memRes2, true}); EXPECT_THAT(memRes1.allocs.size(), Eq(0u)); EXPECT_THAT(memRes1.deallocs.size(), Eq(0u)); EXPECT_THAT(memRes2.allocs.size(), Eq(2u)); // deallocates are actually == 1, because when we destroy PolyPtrWithPolyPtrBase // it also destroys Base because it is managed by unique_ptr. // in order to do it correctly we should always use custom deleter for structures with nested pointers EXPECT_THAT(memRes2.deallocs.size(), Eq(1u)); }
5,080
619
/* * Author: <NAME> <<EMAIL>> * Copyright (c) 2014 Intel Corporation. * * This program and the accompanying materials are made available under the * terms of the The MIT License which is available at * https://opensource.org/licenses/MIT. * * SPDX-License-Identifier: MIT */ #include <iostream> #include <signal.h> #include "biss0001.hpp" #include "upm_utilities.h" using namespace std; int shouldRun = true; void sig_handler(int signo) { if (signo == SIGINT) shouldRun = false; } int main() { signal(SIGINT, sig_handler); //! [Interesting] // Instantiate a Grove Motion sensor on GPIO pin D2 upm::BISS0001 motion(2); while (shouldRun) { bool val = motion.value(); if (val) cout << "Detecting moving object"; else cout << "No moving objects detected"; cout << endl; upm_delay(1); } //! [Interesting] cout << "Exiting" << endl; return 0; }
391
1,177
{ "blurb": "Given an age in seconds, calculate how old someone is in terms of a given planet's solar years.", "authors": [], "contributors": [ "abhijitparida", "behrtam", "cmccandless", "crsmi", "Dog", "gabriel376", "gabriellhrn", "ikhadykin", "kytrinyx", "lowks", "N-Parsons", "pheanex", "sjakobi", "tqa236" ], "files": { "solution": [ "space_age.py" ], "test": [ "space_age_test.py" ], "example": [ ".meta/example.py" ] }, "source": "Partially inspired by Chapter 1 in <NAME>'s online Learn to Program tutorial.", "source_url": "http://pine.fm/LearnToProgram/?Chapter=01" }
322
414
// // RCDChatViewController.h // RCloudMessage // // Created by Liv on 15/3/13. // Copyright (c) 2015年 RongCloud. All rights reserved. // #import <RongIMKit/RongIMKit.h> @interface RCDChatViewController : RCConversationViewController @property (nonatomic, assign) BOOL needPopToRootView; @end
106
1,755
/*========================================================================= Program: Visualization Toolkit Module: TestPBRAnisotropy.cxx Copyright (c) <NAME>, <NAME>, <NAME> All rights reserved. See Copyright.txt or http://www.kitware.com/Copyright.htm for details. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the above copyright notice for more information. =========================================================================*/ // This test covers the PBR Anisotropy feature // It renders spheres with different anisotropy values #include "vtkActor.h" #include "vtkActorCollection.h" #include "vtkCamera.h" #include "vtkGenericOpenGLRenderWindow.h" #include "vtkImageData.h" #include "vtkImageFlip.h" #include "vtkInteractorStyleTrackballCamera.h" #include "vtkJPEGReader.h" #include "vtkLight.h" #include "vtkNew.h" #include "vtkOpenGLPolyDataMapper.h" #include "vtkOpenGLRenderer.h" #include "vtkOpenGLSkybox.h" #include "vtkOpenGLTexture.h" #include "vtkPBRIrradianceTexture.h" #include "vtkPBRPrefilterTexture.h" #include "vtkPNGReader.h" #include "vtkPolyDataTangents.h" #include "vtkProperty.h" #include "vtkRegressionTestImage.h" #include "vtkRenderWindow.h" #include "vtkRenderWindowInteractor.h" #include "vtkRendererCollection.h" #include "vtkSphereSource.h" #include "vtkTestUtilities.h" #include "vtkTexture.h" #include "vtkTextureMapToSphere.h" //---------------------------------------------------------------------------- int TestPBRAnisotropy(int argc, char* argv[]) { vtkNew<vtkOpenGLRenderer> renderer; vtkNew<vtkRenderWindow> renWin; renWin->SetSize(600, 600); renWin->AddRenderer(renderer); vtkNew<vtkRenderWindowInteractor> iren; iren->SetRenderWindow(renWin); vtkSmartPointer<vtkPBRIrradianceTexture> irradiance = renderer->GetEnvMapIrradiance(); irradiance->SetIrradianceStep(0.3); renderer->UseSphericalHarmonicsOff(); vtkNew<vtkOpenGLTexture> textureCubemap; textureCubemap->CubeMapOn(); textureCubemap->UseSRGBColorSpaceOn(); std::string pathSkybox[6] = { "Data/skybox/posx.jpg", "Data/skybox/negx.jpg", "Data/skybox/posy.jpg", "Data/skybox/negy.jpg", "Data/skybox/posz.jpg", "Data/skybox/negz.jpg" }; for (int i = 0; i < 6; i++) { vtkNew<vtkJPEGReader> jpg; char* fname = vtkTestUtilities::ExpandDataFileName(argc, argv, pathSkybox[i].c_str()); jpg->SetFileName(fname); delete[] fname; vtkNew<vtkImageFlip> flip; flip->SetInputConnection(jpg->GetOutputPort()); flip->SetFilteredAxis(1); // flip y axis textureCubemap->SetInputConnection(i, flip->GetOutputPort()); } renderer->SetEnvironmentTexture(textureCubemap); renderer->UseImageBasedLightingOn(); vtkNew<vtkSphereSource> sphere; sphere->SetThetaResolution(75); sphere->SetPhiResolution(75); vtkNew<vtkTextureMapToSphere> textureMap; textureMap->SetInputConnection(sphere->GetOutputPort()); textureMap->PreventSeamOff(); vtkNew<vtkPolyDataTangents> tangents; tangents->SetInputConnection(textureMap->GetOutputPort()); vtkNew<vtkPolyDataMapper> mapper; mapper->SetInputConnection(tangents->GetOutputPort()); vtkNew<vtkActor> actor; actor->SetMapper(mapper); actor->GetProperty()->SetInterpolationToPBR(); for (int i = 0; i < 6; i++) { vtkNew<vtkActor> actorSphere; actorSphere->SetPosition(i, 0.0, 0.0); actorSphere->RotateX(20); actorSphere->RotateY(20); actorSphere->SetMapper(mapper); actorSphere->GetProperty()->SetInterpolationToPBR(); actorSphere->GetProperty()->SetMetallic(1.0); actorSphere->GetProperty()->SetAnisotropy(1.0); actorSphere->GetProperty()->SetRoughness(i / 5.0); renderer->AddActor(actorSphere); } for (int i = 0; i < 6; i++) { vtkNew<vtkActor> actorSphere; actorSphere->SetPosition(i, 1.0, 0.0); actorSphere->RotateX(20); actorSphere->RotateY(20); actorSphere->SetMapper(mapper); actorSphere->GetProperty()->SetInterpolationToPBR(); actorSphere->GetProperty()->SetMetallic(1.0); actorSphere->GetProperty()->SetRoughness(0.1); actorSphere->GetProperty()->SetAnisotropy(i / 5.0); renderer->AddActor(actorSphere); } for (int i = 0; i < 6; i++) { vtkNew<vtkActor> actorSphere; actorSphere->SetPosition(i, 2.0, 0.0); actorSphere->RotateX(20); actorSphere->RotateY(20); actorSphere->SetMapper(mapper); actorSphere->GetProperty()->SetInterpolationToPBR(); actorSphere->GetProperty()->SetMetallic(1.0); actorSphere->GetProperty()->SetRoughness(0.1); actorSphere->GetProperty()->SetAnisotropy(1.0); actorSphere->GetProperty()->SetAnisotropyRotation(i / 5.0); renderer->AddActor(actorSphere); } renWin->Render(); int retVal = vtkRegressionTestImage(renWin); if (retVal == vtkRegressionTester::DO_INTERACTOR) { iren->Start(); } return !retVal; }
1,834
442
package domain; /** * 复杂一点的书 * @author JingQ at 2019-06-05 */ public class ComplexBook extends BaseBook { private String tag; public ComplexBook() {} public ComplexBook(String name, String tag) { setName(name); this.tag = tag; } @Override public String getName() { return super.getName() == null ? "Complex book name" : super.getName(); } public String getTag() { return tag; } public void setTag(String tag) { this.tag = tag; } }
171
381
<reponame>jsalinaspolo/JGiven package com.tngtech.jgiven.impl.inject; import static java.util.stream.Collectors.toList; import com.google.common.collect.Maps; import com.tngtech.jgiven.annotation.ExpectedScenarioState; import com.tngtech.jgiven.annotation.ProvidedScenarioState; import com.tngtech.jgiven.annotation.ScenarioState; import com.tngtech.jgiven.annotation.ScenarioState.Resolution; import com.tngtech.jgiven.exception.AmbiguousResolutionException; import com.tngtech.jgiven.exception.JGivenInjectionException; import com.tngtech.jgiven.exception.JGivenMissingGuaranteedScenarioStateException; import com.tngtech.jgiven.exception.JGivenMissingRequiredScenarioStateException; import com.tngtech.jgiven.impl.util.FieldCache; import java.lang.reflect.Field; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Used by Scenario to inject and read values from objects. */ public class ValueInjector { private static final Logger log = LoggerFactory.getLogger(ValueInjector.class); /** * Caches all classes that have been already validated for ambiguous resolution. * This avoids duplicate validations of the same class. */ private static final ConcurrentHashMap<Class<?>, Boolean> validatedClasses = new ConcurrentHashMap<>(); private final ValueInjectorState state = new ValueInjectorState(); /** * @throws AmbiguousResolutionException when multiple fields with the same resolution exist in the given object */ @SuppressWarnings("unchecked") public void validateFields(Object object) { if (validatedClasses.get(object.getClass()) == Boolean.TRUE) { return; } Map<Object, Field> resolvedFields = Maps.newHashMap(); for (ScenarioStateField field : getScenarioFields(object)) { field.getField().setAccessible(true); Resolution resolution = field.getResolution(); Object key = null; if (resolution == Resolution.NAME) { key = field.getField().getName(); } else { key = field.getField().getType(); } if (resolvedFields.containsKey(key)) { Field existingField = resolvedFields.get(key); throw new AmbiguousResolutionException("Ambiguous fields with same " + resolution + " detected. Field 1: " + existingField + ", field 2: " + field.getField()); } resolvedFields.put(key, field.getField()); } validatedClasses.put(object.getClass(), Boolean.TRUE); } private List<ScenarioStateField> getScenarioFields(Object object) { @SuppressWarnings("unchecked") List<Field> scenarioFields = FieldCache .get(object.getClass()) .getFieldsWithAnnotation(ScenarioState.class, ProvidedScenarioState.class, ExpectedScenarioState.class); return scenarioFields.stream() .map(ScenarioStateField.fromField) .collect(toList()); } /** * @throws JGivenMissingGuaranteedScenarioStateException in case a field is guaranteed * and is not initialized by the finishing stage */ @SuppressWarnings("unchecked") public void readValues(Object object) { validateFields(object); checkGuaranteedStatesAreInitialized(object); for (ScenarioStateField field : getScenarioFields(object)) { try { Object value = field.getField().get(object); updateValue(field, value); log.debug("Reading value {} from field {}", value, field.getField()); } catch (IllegalAccessException e) { throw new RuntimeException("Error while reading field " + field.getField(), e); } } } /** * @throws JGivenMissingRequiredScenarioStateException in case a field requires * a value and the value is not present */ @SuppressWarnings("unchecked") public void updateValues(Object object) { validateFields(object); for (ScenarioStateField field : getScenarioFields(object)) { Object value = getValue(field); if (value != null) { try { field.getField().set(object, value); } catch (IllegalAccessException e) { throw new RuntimeException("Error while updating field " + field.getField(), e); } log.debug("Setting field {} to value {}", field.getField(), value); } else if (field.isRequired()) { throw new JGivenMissingRequiredScenarioStateException(field.getField()); } } } public <T> void injectValueByType(Class<T> clazz, T value) { state.updateValueByType(clazz, value); } public <T> void injectValueByName(String name, T value) { state.updateValueByName(name, value); } private void updateValue(ScenarioStateField field, Object value) { if (field.getResolution() == Resolution.NAME) { state.updateValueByName(field.getField().getName(), value); } else { state.updateValueByType(field.getField().getType(), value); } } private Object getValue(ScenarioStateField field) { if (field.getResolution() == Resolution.NAME) { return state.getValueByName(field.getField().getName()); } return state.getValueByType(field.getField().getType()); } private void checkGuaranteedStatesAreInitialized(Object instance) { for (Field field: FieldCache.get(instance.getClass()) .getFieldsWithAnnotation(ProvidedScenarioState.class, ScenarioState.class)) { if (field.isAnnotationPresent(ProvidedScenarioState.class)) { if (field.getAnnotation(ProvidedScenarioState.class).guaranteed()) { checkInitialized(instance, field); } } if (field.isAnnotationPresent(ScenarioState.class)) { if (field.getAnnotation(ScenarioState.class).guaranteed()) { checkInitialized(instance, field); } } } } private void checkInitialized(Object instance, Field field) { Object value = null; try { value = field.get(instance); } catch (IllegalAccessException e) { throw new JGivenInjectionException("The guaranteed field inside the scenario state cannot be accessed", e); } if (value == null) { throw new JGivenMissingGuaranteedScenarioStateException(field); } } }
2,892
575
<filename>content/browser/appcache/appcache.cc // Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "content/browser/appcache/appcache.h" #include <stddef.h> #include <algorithm> #include <vector> #include "base/check_op.h" #include "base/notreached.h" #include "base/stl_util.h" #include "content/browser/appcache/appcache_database.h" #include "content/browser/appcache/appcache_group.h" #include "content/browser/appcache/appcache_host.h" #include "content/browser/appcache/appcache_storage.h" #include "content/common/appcache_interfaces.h" #include "third_party/blink/public/mojom/appcache/appcache.mojom.h" #include "url/origin.h" namespace content { // static bool AppCache::CheckValidManifestScope(const GURL& manifest_url, const std::string& manifest_scope) { if (manifest_scope.empty()) return false; const GURL url = manifest_url.Resolve(manifest_scope); return url.is_valid() && !url.has_ref() && !url.has_query() && url.spec().back() == '/'; } // static std::string AppCache::GetManifestScope(const GURL& manifest_url, std::string optional_scope) { DCHECK(manifest_url.is_valid()); if (!optional_scope.empty()) { std::string scope = manifest_url.Resolve(optional_scope).path(); if (CheckValidManifestScope(manifest_url, scope)) { return optional_scope; } } // The default manifest scope is the path to the manifest URL's containing // directory. const GURL manifest_scope_url = manifest_url.GetWithoutFilename(); DCHECK(manifest_scope_url.is_valid()); DCHECK(CheckValidManifestScope(manifest_url, manifest_scope_url.path())); return manifest_scope_url.path(); } AppCache::AppCache(AppCacheStorage* storage, int64_t cache_id) : cache_id_(cache_id), owning_group_(nullptr), online_safelist_all_(false), is_complete_(false), cache_size_(0), padding_size_(0), manifest_parser_version_(-1), manifest_scope_(""), storage_(storage) { storage_->working_set()->AddCache(this); } AppCache::~AppCache() { DCHECK(associated_hosts_.empty()); if (owning_group_.get()) { DCHECK(is_complete_); owning_group_->RemoveCache(this); } DCHECK(!owning_group_.get()); storage_->working_set()->RemoveCache(this); } void AppCache::UnassociateHost(AppCacheHost* host) { associated_hosts_.erase(host); } void AppCache::AddEntry(const GURL& url, const AppCacheEntry& entry) { DCHECK(entries_.find(url) == entries_.end()); entries_.insert(EntryMap::value_type(url, entry)); cache_size_ += entry.response_size(); padding_size_ += entry.padding_size(); } bool AppCache::AddOrModifyEntry(const GURL& url, const AppCacheEntry& entry) { std::pair<EntryMap::iterator, bool> ret = entries_.insert(EntryMap::value_type(url, entry)); // Entry already exists. Merge the types and token expiration of the new and // existing entries. if (!ret.second) { ret.first->second.add_types(entry.types()); } else { cache_size_ += entry.response_size(); // New entry. Add to cache size. padding_size_ += entry.padding_size(); } return ret.second; } void AppCache::RemoveEntry(const GURL& url) { auto found = entries_.find(url); DCHECK(found != entries_.end()); DCHECK_GE(cache_size_, found->second.response_size()); DCHECK_GE(padding_size_, found->second.padding_size()); cache_size_ -= found->second.response_size(); padding_size_ -= found->second.padding_size(); entries_.erase(found); } AppCacheEntry* AppCache::GetEntry(const GURL& url) { auto it = entries_.find(url); return (it != entries_.end()) ? &(it->second) : nullptr; } const AppCacheEntry* AppCache::GetEntryAndUrlWithResponseId( int64_t response_id, GURL* optional_url_out) { for (const auto& pair : entries_) { if (pair.second.response_id() == response_id) { if (optional_url_out) *optional_url_out = pair.first; return &pair.second; } } return nullptr; } GURL AppCache::GetNamespaceEntryUrl( const std::vector<AppCacheNamespace>& namespaces, const GURL& namespace_url) const { size_t count = namespaces.size(); for (size_t i = 0; i < count; ++i) { if (namespaces[i].namespace_url == namespace_url) return namespaces[i].target_url; } NOTREACHED(); return GURL(); } namespace { bool SortNamespacesByLength( const AppCacheNamespace& lhs, const AppCacheNamespace& rhs) { return lhs.namespace_url.spec().length() > rhs.namespace_url.spec().length(); } } void AppCache::InitializeWithManifest(AppCacheManifest* manifest) { DCHECK(manifest); manifest_parser_version_ = manifest->parser_version; manifest_scope_ = manifest->scope; intercept_namespaces_.swap(manifest->intercept_namespaces); fallback_namespaces_.swap(manifest->fallback_namespaces); online_safelist_namespaces_.swap(manifest->online_safelist_namespaces); online_safelist_all_ = manifest->online_safelist_all; token_expires_ = manifest->token_expires; // Sort the namespaces by url string length, longest to shortest, // since longer matches trump when matching a url to a namespace. std::sort(intercept_namespaces_.begin(), intercept_namespaces_.end(), SortNamespacesByLength); std::sort(fallback_namespaces_.begin(), fallback_namespaces_.end(), SortNamespacesByLength); } void AppCache::InitializeWithDatabaseRecords( const AppCacheDatabase::CacheRecord& cache_record, const std::vector<AppCacheDatabase::EntryRecord>& entries, const std::vector<AppCacheDatabase::NamespaceRecord>& intercepts, const std::vector<AppCacheDatabase::NamespaceRecord>& fallbacks, const std::vector<AppCacheDatabase::OnlineSafeListRecord>& safelists) { DCHECK_EQ(cache_id_, cache_record.cache_id); manifest_parser_version_ = cache_record.manifest_parser_version; manifest_scope_ = cache_record.manifest_scope; online_safelist_all_ = cache_record.online_wildcard; update_time_ = cache_record.update_time; token_expires_ = cache_record.token_expires; for (const AppCacheDatabase::EntryRecord& entry : entries) { AddEntry(entry.url, AppCacheEntry(entry.flags, entry.response_id, entry.response_size, entry.padding_size)); } DCHECK_EQ(cache_size_, cache_record.cache_size); DCHECK_EQ(padding_size_, cache_record.padding_size); for (const auto& intercept : intercepts) intercept_namespaces_.push_back(intercept.namespace_); for (const auto& fallback : fallbacks) fallback_namespaces_.push_back(fallback.namespace_); // Sort the fallback namespaces by url string length, longest to shortest, // since longer matches trump when matching a url to a namespace. std::sort(intercept_namespaces_.begin(), intercept_namespaces_.end(), SortNamespacesByLength); std::sort(fallback_namespaces_.begin(), fallback_namespaces_.end(), SortNamespacesByLength); for (const auto& record : safelists) { online_safelist_namespaces_.emplace_back(APPCACHE_NETWORK_NAMESPACE, record.namespace_url, GURL()); } } void AppCache::ToDatabaseRecords( const AppCacheGroup* group, AppCacheDatabase::CacheRecord* cache_record, std::vector<AppCacheDatabase::EntryRecord>* entries, std::vector<AppCacheDatabase::NamespaceRecord>* intercepts, std::vector<AppCacheDatabase::NamespaceRecord>* fallbacks, std::vector<AppCacheDatabase::OnlineSafeListRecord>* safelists) { DCHECK(group && cache_record && entries && fallbacks && safelists); DCHECK(entries->empty() && fallbacks->empty() && safelists->empty()); cache_record->cache_id = cache_id_; cache_record->group_id = group->group_id(); cache_record->online_wildcard = online_safelist_all_; cache_record->update_time = update_time_; cache_record->cache_size = cache_size_; cache_record->padding_size = padding_size_; cache_record->manifest_parser_version = manifest_parser_version_; cache_record->manifest_scope = manifest_scope_; cache_record->token_expires = token_expires_; for (const auto& pair : entries_) { entries->push_back(AppCacheDatabase::EntryRecord()); AppCacheDatabase::EntryRecord& record = entries->back(); record.url = pair.first; record.cache_id = cache_id_; record.flags = pair.second.types(); record.response_id = pair.second.response_id(); record.response_size = pair.second.response_size(); record.padding_size = pair.second.padding_size(); } const url::Origin origin = url::Origin::Create(group->manifest_url()); for (const AppCacheNamespace& intercept_namespace : intercept_namespaces_) { intercepts->push_back(AppCacheDatabase::NamespaceRecord()); AppCacheDatabase::NamespaceRecord& record = intercepts->back(); record.cache_id = cache_id_; record.origin = origin; record.namespace_ = intercept_namespace; } for (const AppCacheNamespace& fallback_namespace : fallback_namespaces_) { fallbacks->push_back(AppCacheDatabase::NamespaceRecord()); AppCacheDatabase::NamespaceRecord& record = fallbacks->back(); record.cache_id = cache_id_; record.origin = origin; record.namespace_ = fallback_namespace; } for (const AppCacheNamespace& online_namespace : online_safelist_namespaces_) { safelists->push_back(AppCacheDatabase::OnlineSafeListRecord()); AppCacheDatabase::OnlineSafeListRecord& record = safelists->back(); record.cache_id = cache_id_; record.namespace_url = online_namespace.namespace_url; } } bool AppCache::FindResponseForRequest(const GURL& url, AppCacheEntry* found_entry, GURL* found_intercept_namespace, AppCacheEntry* found_fallback_entry, GURL* found_fallback_namespace, bool* found_network_namespace) { // Ignore fragments when looking up URL in the cache. GURL url_no_ref; if (url.has_ref()) { GURL::Replacements replacements; replacements.ClearRef(); url_no_ref = url.ReplaceComponents(replacements); } else { url_no_ref = url; } // 6.6.6 Changes to the networking model AppCacheEntry* entry = GetEntry(url_no_ref); if (entry) { *found_entry = *entry; return true; } *found_network_namespace = IsInNetworkNamespace(url_no_ref); if (*found_network_namespace) return true; const AppCacheNamespace* intercept_namespace = FindInterceptNamespace(url_no_ref); if (intercept_namespace) { entry = GetEntry(intercept_namespace->target_url); DCHECK(entry); *found_entry = *entry; *found_intercept_namespace = intercept_namespace->namespace_url; return true; } const AppCacheNamespace* fallback_namespace = FindFallbackNamespace(url_no_ref); if (fallback_namespace) { entry = GetEntry(fallback_namespace->target_url); DCHECK(entry); *found_fallback_entry = *entry; *found_fallback_namespace = fallback_namespace->namespace_url; return true; } *found_network_namespace = online_safelist_all_; return *found_network_namespace; } void AppCache::ToResourceInfoVector( std::vector<blink::mojom::AppCacheResourceInfo>* infos) const { DCHECK(infos && infos->empty()); for (const auto& pair : entries_) { infos->push_back(blink::mojom::AppCacheResourceInfo()); blink::mojom::AppCacheResourceInfo& info = infos->back(); info.url = pair.first; info.is_master = pair.second.IsMaster(); info.is_manifest = pair.second.IsManifest(); info.is_intercept = pair.second.IsIntercept(); info.is_fallback = pair.second.IsFallback(); info.is_foreign = pair.second.IsForeign(); info.is_explicit = pair.second.IsExplicit(); info.response_size = pair.second.response_size(); info.padding_size = pair.second.padding_size(); info.response_id = pair.second.response_id(); } } // static const AppCacheNamespace* AppCache::FindNamespace( const std::vector<AppCacheNamespace>& namespaces, const GURL& url) { size_t count = namespaces.size(); for (size_t i = 0; i < count; ++i) { if (namespaces[i].IsMatch(url)) return &namespaces[i]; } return nullptr; } } // namespace content
4,376
491
/** * * This is to be included only from ragged_ops.h. * * Copyright 2020 Xiaomi Corporation (authors: <NAME> * <NAME>) * Mobvoi Inc. (authors: <NAME>) * * See LICENSE for clarification regarding multiple authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef K2_CSRC_RAGGED_INL_H_ #define K2_CSRC_RAGGED_INL_H_ #ifndef IS_IN_K2_CSRC_RAGGED_H_ #error "this file is supposed to be included only by ragged_ops.h" #endif namespace k2 { template<int MAX_LAYERS> RowSplitsAccessor<MAX_LAYERS>::RowSplitsAccessor(RaggedShape &src) { int32_t num_layers = src.NumLayers(); K2_CHECK_LE(src.NumLayers(), MAX_LAYERS); for (int i = 0; i < num_layers; i++) ptrs[i] = src.RowSplits(i + 1).Data(); } template<int MAX_LAYERS> RowIdsAccessor<MAX_LAYERS>::RowIdsAccessor(RaggedShape &src) { int32_t num_layers = src.NumLayers(); K2_CHECK_LE(src.NumLayers(), MAX_LAYERS); for (int i = 0; i < num_layers; i++) ptrs[i] = src.RowIds(i + 1).Data(); } } // namespace k2 #endif // K2_CSRC_RAGGED_INL_H_
648
600
//----------------------------------------------------------------------------- // File: XBUtil.h // // Desc: Shortcut macros and helper functions for the XBox samples // // Hist: 11.01.00 - New for November XDK release // 12.01.00 - Moved input code to XBInput.cpp // 12.15.00 - Changes for December XDK release // // Copyright (c) Microsoft Corporation. All rights reserved. //----------------------------------------------------------------------------- #ifndef XBUTIL_H #define XBUTIL_H #include <xtl.h> #include <tchar.h> #include <assert.h> #ifdef __cplusplus extern "C" { #endif void writexbox( char *msg ); void sprintfx( const char *fmt, ... ); #ifdef __cplusplus } #endif //----------------------------------------------------------------------------- // Miscellaneous helper functions //----------------------------------------------------------------------------- // For deleting and releasing objects #define SAFE_DELETE(p) { delete (p); (p)=NULL; } #define SAFE_DELETE_ARRAY(p) { delete[] (p); (p)=NULL; } #define SAFE_RELEASE(p) { if(p) { (p)->Release(); (p)=NULL; } } #ifdef _DEBUG #define OUTPUT_DEBUG_STRING(s) OutputDebugStringA(s) //#define OUTPUT_DEBUG_STRING(s) writexbox(s) #else #define OUTPUT_DEBUG_STRING(s) (VOID)(s) #endif // For converting a FLOAT to a DWORD (useful for SetRenderState() calls) inline DWORD FtoDW( FLOAT f ) { return *((DWORD*)&f); } //----------------------------------------------------------------------------- // Name: XBUtil_SetMediaPath() and XBUtil_FindMediaFile() // Desc: Functions for setting a media path and returning a valid path to a // media file. //----------------------------------------------------------------------------- VOID XBUtil_SetMediaPath( const CHAR* strPath ); HRESULT XBUtil_FindMediaFile( CHAR* strPath, const CHAR* strFilename ); //----------------------------------------------------------------------------- // Name: XBox specifc counters // Desc: * The CPU runs at 733MHz, therefore // Time in micro seconds = ticks / 733MHz = ticks * 3/2200 // * Using DOUBLE to maintain percision // * See "A Note On Timers" whitepaper //----------------------------------------------------------------------------- __forceinline __int64 GetMachineTime() { __asm rdtsc } __forceinline __int64 GetTimeInMicroSeconds() { return GetMachineTime()*3/2200;} __forceinline DOUBLE GetTimeInSeconds() { return GetTimeInMicroSeconds() / 1000000.0;} //----------------------------------------------------------------------------- // Name: XBUtil_Timer() // Desc: Performs timer operations. Use the following commands: // TIMER_RESET - to reset the timer // TIMER_START - to start the timer // TIMER_STOP - to stop (or pause) the timer // TIMER_ADVANCE - to advance the timer by 0.1 seconds // TIMER_RETRACT - to retract the timer by 0.1 seconds // TIMER_GETABSOLUTETIME - to get the absolute system time // TIMER_GETAPPTIME - to get the current time //----------------------------------------------------------------------------- enum TIMER_COMMAND { TIMER_RESET, TIMER_START, TIMER_STOP, TIMER_ADVANCE, TIMER_RETRACT, TIMER_GETABSOLUTETIME, TIMER_GETAPPTIME }; FLOAT XBUtil_Timer( TIMER_COMMAND command ); //----------------------------------------------------------------------------- // Name: XBUtil_InitMaterial() // Desc: Initializes a D3DMATERIAL8 structure, setting the diffuse and ambient // colors. It does not set emissive or specular colors. //----------------------------------------------------------------------------- VOID XBUtil_InitMaterial( D3DMATERIAL8& mtrl, FLOAT r=0.0f, FLOAT g=0.0f, FLOAT b=0.0f, FLOAT a=1.0f ); //----------------------------------------------------------------------------- // Name: XBUtil_InitLight() // Desc: Initializes a D3DLIGHT structure, setting the light position. The // diffuse color is set to white, specular and ambient left as black. //----------------------------------------------------------------------------- VOID XBUtil_InitLight( D3DLIGHT8& light, D3DLIGHTTYPE ltType, FLOAT x=0.0f, FLOAT y=0.0f, FLOAT z=0.0f ); //----------------------------------------------------------------------------- // Name: XBUtil_CreateTexture() // Desc: Helper function to create a texture. //----------------------------------------------------------------------------- HRESULT XBUtil_CreateTexture( LPDIRECT3DDEVICE8 pd3dDevice, const CHAR* strTexture, LPDIRECT3DTEXTURE8* ppTexture, D3DFORMAT d3dFormat = D3DFMT_UNKNOWN ); //----------------------------------------------------------------------------- // Name: XBUtil_UnswizzleTexture() / XBUtil_SwizzleTexture() // Desc: Unswizzles / swizzles a texture before it gets unlocked. Note: this // operation is typically very slow. //----------------------------------------------------------------------------- VOID XBUtil_UnswizzleTexture2D( D3DLOCKED_RECT* pLock, const D3DSURFACE_DESC* pDesc ); VOID XBUtil_UnswizzleTexture3D( D3DLOCKED_BOX* pLock, const D3DVOLUME_DESC* pDesc ); VOID XBUtil_SwizzleTexture2D( D3DLOCKED_RECT* pLock, const D3DSURFACE_DESC* pDesc ); VOID XBUtil_SwizzleTexture3D( D3DLOCKED_BOX* pLock, const D3DVOLUME_DESC* pDesc ); //----------------------------------------------------------------------------- // Name: XBUtil_CreateVertexShader() // Desc: Creates a file-based vertex shader //----------------------------------------------------------------------------- HRESULT XBUtil_CreateVertexShader( LPDIRECT3DDEVICE8 pd3dDevice, const CHAR* strFilename, const DWORD* pdwVertexDecl, DWORD* pdwVertexShader ); //----------------------------------------------------------------------------- // Name: XBUtil_CreatePixelShader() // Desc: Creates a file-based pixel shader //----------------------------------------------------------------------------- HRESULT XBUtil_CreatePixelShader( LPDIRECT3DDEVICE8 pd3dDevice, const CHAR* strFilename, DWORD* pdwPixelShader ); //----------------------------------------------------------------------------- // Name: XBUtil_VectorToRGBA() // Desc: Converts a normal into an RGBA vector. //----------------------------------------------------------------------------- inline D3DCOLOR XBUtil_VectorToRGBA( const D3DXVECTOR3* v, FLOAT fHeight = 1.0f ) { D3DCOLOR r = (D3DCOLOR)( ( v->x + 1.0f ) * 127.5f ); D3DCOLOR g = (D3DCOLOR)( ( v->y + 1.0f ) * 127.5f ); D3DCOLOR b = (D3DCOLOR)( ( v->z + 1.0f ) * 127.5f ); D3DCOLOR a = (D3DCOLOR)( 255.0f * fHeight ); return( (a<<24L) + (r<<16L) + (g<<8L) + (b<<0L) ); } //----------------------------------------------------------------------------- // Name: XBUtil_GetCubeMapViewMatrix() // Desc: Returns a view matrix for rendering to a face of a cube map. //----------------------------------------------------------------------------- D3DXMATRIX XBUtil_GetCubeMapViewMatrix( DWORD dwFace ); //----------------------------------------------------------------------------- // Name: XBUtil_CreateNormalizationCubeMap() // Desc: Creates a cube map and fills it with normalized RGBA vectors. //----------------------------------------------------------------------------- HRESULT XBUtil_CreateNormalizationCubeMap( LPDIRECT3DDEVICE8 pd3dDevice, DWORD dwSize, LPDIRECT3DCUBETEXTURE8* ppCubeMap ); //----------------------------------------------------------------------------- // Name: XBUtil_DumpSurface() // Desc: Writes the contents of a surface (32-bit only) to a .tga file. This // could be a back buffer, texture, or any other 32-bit surface. //----------------------------------------------------------------------------- HRESULT XBUtil_DumpSurface( LPDIRECT3DSURFACE8 pSurface, const CHAR* strFileName, BOOL bSurfaceIsTiled = FALSE ); //----------------------------------------------------------------------------- // Name: XBUtil_EvaluateHermite() // Desc: Evaluate a cubic parametric equation. Returns the point at u on a // Hermite curve. //----------------------------------------------------------------------------- D3DXVECTOR3 XBUtil_EvaluateHermite( const D3DXVECTOR3& p0, const D3DXVECTOR3& p1, const D3DXVECTOR3& v0, const D3DXVECTOR3& v1, FLOAT u ); //----------------------------------------------------------------------------- // Name: XBUtil_EvaluateCatmullRom() // Desc: Evaluate a cubic parametric equation. Returns the point at u on a // Catmull-Rom curve. //----------------------------------------------------------------------------- D3DXVECTOR3 XBUtil_EvaluateCatmullRom( const D3DXVECTOR3& p1, const D3DXVECTOR3& p2, const D3DXVECTOR3& p3, const D3DXVECTOR3& p4, FLOAT u ); //----------------------------------------------------------------------------- // Name: XBUtil_GetSplinePoint() // Desc: Returns a point on a spline. The spline is defined by an array of // points, and the point and tangent returned are located at position t // on the spline, where 0 < t < dwNumSpinePts. //----------------------------------------------------------------------------- VOID XBUtil_GetSplinePoint( const D3DXVECTOR3* pSpline, DWORD dwNumSpinePts, FLOAT t, D3DXVECTOR3* pvPoint, D3DXVECTOR3* pvTangent ); //----------------------------------------------------------------------------- // Name: XBUtil_RenderSpline() // Desc: For debugging purposes, visually renders a spline. //----------------------------------------------------------------------------- VOID XBUtil_RenderSpline( LPDIRECT3DDEVICE8 pd3dDevice, const D3DXVECTOR3* pSpline, DWORD dwNumSplinePts, DWORD dwColor, BOOL bRenderAxes ); //----------------------------------------------------------------------------- // Name: XBUtil_DeclaratorFromFVF() // Desc: Create a vertex declaration from an FVF. Registers are assigned as // follows: // v0 = Vertex position // v1 = Vertex blend weights // v2 = Vertex normal // v3 = Vertex pointsize // v4 = Vertex diffuse color // v5 = Vertex specular color // v6-v9 = Vertex texture coords //----------------------------------------------------------------------------- HRESULT XBUtil_DeclaratorFromFVF( DWORD dwFVF, DWORD Declaration[MAX_FVF_DECL_SIZE] ); //----------------------------------------------------------------------------- // Name: XBUtil_GetWide() // Desc: Convert CHAR string to WCHAR string. dwMax includes the null byte. // Never copies more than dwMax-1 characters into strWide. // Ex: GetWide( "abc", strWide, 3 ) gives strWide = "ab" // Typical usage: // WCHAR strResult[MAX]; // XBUtil_GetWide( strThin, strResult, MAX ); //----------------------------------------------------------------------------- VOID XBUtil_GetWide( const CHAR* strThin, WCHAR* strWide, DWORD dwMax ); #endif // XBUTIL_H
3,496
1,144
package de.metas.handlingunits.attribute.strategy.impl; /* * #%L * de.metas.handlingunits.base * %% * Copyright (C) 2015 metas GmbH * %% * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as * published by the Free Software Foundation, either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program. If not, see * <http://www.gnu.org/licenses/gpl-2.0.html>. * #L% */ import java.math.BigDecimal; import org.adempiere.exceptions.AdempiereException; import org.compiere.model.I_M_Attribute; import de.metas.handlingunits.attribute.strategy.IAttributeAggregationStrategy; public class SumAggregationStrategy implements IAttributeAggregationStrategy { /** * Adds the given <code>valueDelta</code> to the given <code>valueOld</code> <code>null</code> values are allowed and will be intnerpreted as {@link BigDecimal#ZERO}. The given * <code>attribute</code> parameter is ignored. */ @Override public Object aggregate(final I_M_Attribute ignored, final Object valueOld, final Object valueDelta) { final BigDecimal valueOldBD = coerceToBigDecimal(valueOld); final BigDecimal valueDeltaBD = coerceToBigDecimal(valueDelta); final BigDecimal valueNewBD = valueOldBD.add(valueDeltaBD); return valueNewBD; } private BigDecimal coerceToBigDecimal(final Object value) { final BigDecimal convertedValue; if (value == null) { convertedValue = BigDecimal.ZERO; } else { try { convertedValue = new BigDecimal(value.toString()); } catch (final Exception e) { throw new AdempiereException("Could not create BigDecimal from object: " + value, e); } } return convertedValue; } }
669
320
<gh_stars>100-1000 #include "util.h" std::string version_to_str(DWORD version) { BYTE *chunks = (BYTE*)&version; std::stringstream stream; stream << std::hex << (int)chunks[3] << "." << (int)chunks[2] << "." << (int)chunks[1] << "." << (int)chunks[0]; return stream.str(); } std::string make_dir_name(std::string baseDir, time_t timestamp) { std::stringstream stream; if (baseDir.length() > 0) { stream << baseDir; stream << "\\"; } stream << "scan_"; stream << timestamp; return stream.str(); } bool set_output_dir(pesieve::t_params &args, const char *new_dir) { if (!new_dir) return false; size_t new_len = strlen(new_dir); size_t buffer_len = sizeof(args.output_dir); if (new_len > buffer_len) return false; memset(args.output_dir, 0, buffer_len); memcpy(args.output_dir, new_dir, new_len); return true; } char* get_file_name(char *full_path) { if (!full_path) return nullptr; size_t len = strlen(full_path); if (len < 2) { return full_path; } for (size_t i = len - 2; i > 0; i--) { if (full_path[i] == '\\' || full_path[i] == '/') { return full_path + (i + 1); } } return full_path; }
589
2,151
// Copyright (c) 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_TEST_CHROMEDRIVER_LOGGING_H_ #define CHROME_TEST_CHROMEDRIVER_LOGGING_H_ #include <memory> #include <string> #include <vector> #include "base/containers/circular_deque.h" #include "base/macros.h" #include "base/values.h" #include "chrome/test/chromedriver/chrome/log.h" struct Capabilities; class CommandListener; class DevToolsEventListener; class ListValue; struct Session; class Status; namespace internal { static const size_t kMaxReturnedEntries = 100000; } // namespace internal // Accumulates WebDriver Logging API entries of a given type and minimum level. // See https://code.google.com/p/selenium/wiki/Logging. class WebDriverLog : public Log { public: static const char kBrowserType[]; static const char kDriverType[]; static const char kPerformanceType[]; static const char kDevToolsType[]; // Converts WD wire protocol level name -> Level, false on bad name. static bool NameToLevel(const std::string& name, Level* out_level); // Creates a WebDriverLog with the given type and minimum level. WebDriverLog(const std::string& type, Level min_level); ~WebDriverLog() override; // Returns entries accumulated so far, as a ListValue ready for serialization // into the wire protocol response to the "/log" command. // The caller assumes ownership of the ListValue, and the WebDriverLog // creates and owns a new empty ListValue for further accumulation. std::unique_ptr<base::ListValue> GetAndClearEntries(); // Finds the first error message in the log and returns it. If none exist, // returns an empty string. Does not clear entries. std::string GetFirstErrorMessage() const; // Translates a Log entry level into a WebDriver level and stores the entry. void AddEntryTimestamped(const base::Time& timestamp, Level level, const std::string& source, const std::string& message) override; // Whether or not batches_of_entries_ is empty when it is being emptied. bool Emptied() const override; const std::string& type() const; void set_min_level(Level min_level); Level min_level() const; private: const std::string type_; // WebDriver log type. Level min_level_; // Minimum level of entries to store. // Log is empty when it is emptied, or when it is initialized (because we // want GetLog to collect trace events initially). bool emptied_; // A queue of batches of entries. Each batch can have no more than // |kMaxReturnedEntries| values in it. This is to avoid HTTP response buffer // overflow (crbug.com/681892). base::circular_deque<std::unique_ptr<base::ListValue>> batches_of_entries_; DISALLOW_COPY_AND_ASSIGN(WebDriverLog); }; // Initializes logging system for ChromeDriver. Returns true on success. bool InitLogging(); // Creates |Log|s, |DevToolsEventListener|s, and |CommandListener|s based on // logging preferences. Status CreateLogs( const Capabilities& capabilities, const Session* session, std::vector<std::unique_ptr<WebDriverLog>>* out_logs, std::vector<std::unique_ptr<DevToolsEventListener>>* out_devtools_listeners, std::vector<std::unique_ptr<CommandListener>>* out_command_listeners); #endif // CHROME_TEST_CHROMEDRIVER_LOGGING_H_
1,085
3,494
/* Copyright (c) 2010, The Cinder Project (http://libcinder.org) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #include "cinder/Cinder.h" #include "cinder/Url.h" #if defined( __OBJC__ ) @class NSThread; @class IStreamUrlImplCocoaDelegate; #else class NSThread; class IStreamUrlImplCocoaDelegate; #endif namespace cinder { class IStreamUrlImplCocoa : public IStreamUrlImpl { public: IStreamUrlImplCocoa( const std::string &url, const std::string &user, const std::string &password, const UrlOptions &options ); ~IStreamUrlImplCocoa(); virtual size_t readDataAvailable( void *dest, size_t maxSize ); virtual void seekAbsolute( off_t absoluteOffset ); virtual void seekRelative( off_t relativeOffset ); virtual off_t tell() const; virtual off_t size() const; virtual bool isEof() const; virtual void IORead( void *t, size_t size ); private: void fillBuffer( int wantBytes ) const; NSThread *mThread; IStreamUrlImplCocoaDelegate *mDelegate; }; } // namespace cinder
702
640
<gh_stars>100-1000 /*-------------------------------------------- 《加密与解密(第四版)》 (c) 看雪学院 www.kanxue.com 2000-2018 ----------------------------------------------*/ //mod1.cpp #include "stdafx.h" int _tmain(int argc, _TCHAR* argv[]) { long long nNum; scanf("%ld", &nNum); printf("%d\r\n", argc % 3); printf("%d\r\n", nNum % 10); return 0; }
176
335
{ "word": "Misspelling", "definitions": [ "An incorrect spelling of a word." ], "parts-of-speech": "Noun" }
59
1,664
<reponame>likenamehaojie/Apache-Ambari-ZH /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.oozie.ambari.view.assets; import org.apache.ambari.view.DataStore; import org.apache.ambari.view.PersistenceException; import org.apache.oozie.ambari.view.assets.model.ActionAsset; import org.apache.oozie.ambari.view.repo.BaseRepo; import java.util.Collection; public class AssetRepo extends BaseRepo<ActionAsset> { public AssetRepo(DataStore dataStore) { super(ActionAsset.class, dataStore); } public Collection<ActionAsset> getMyAsets(String userName) { try { return dataStore.findAll(ActionAsset.class, " owner='" + userName + "'"); } catch (PersistenceException e) { throw new RuntimeException(e); } } public boolean assetNameAvailable(String name) { try { Collection<ActionAsset> assets=dataStore.findAll(ActionAsset.class, " name='" + name + "'"); boolean assetExists= assets!=null && !assets.isEmpty(); return !assetExists; } catch (PersistenceException e) { throw new RuntimeException(e); } } }
568
1,812
<reponame>1006079161/qpdf #ifndef PL_AES_PDF_HH #define PL_AES_PDF_HH #include <qpdf/Pipeline.hh> #include <qpdf/QPDFCryptoImpl.hh> #include <memory> // This pipeline implements AES-128 and AES-256 with CBC and block // padding as specified in the PDF specification. class Pl_AES_PDF: public Pipeline { public: QPDF_DLL // key should be a pointer to key_bytes bytes of data Pl_AES_PDF(char const* identifier, Pipeline* next, bool encrypt, unsigned char const* key, size_t key_bytes); QPDF_DLL virtual ~Pl_AES_PDF(); QPDF_DLL virtual void write(unsigned char* data, size_t len); QPDF_DLL virtual void finish(); // Use zero initialization vector; needed for AESV3 QPDF_DLL void useZeroIV(); // Disable padding; needed for AESV3 QPDF_DLL void disablePadding(); // Specify an initialization vector, which will not be included in // the output. QPDF_DLL void setIV(unsigned char const* iv, size_t bytes); // For testing only; PDF always uses CBC QPDF_DLL void disableCBC(); // For testing only: use a fixed initialization vector for CBC QPDF_DLL static void useStaticIV(); private: void flush(bool discard_padding); void initializeVector(); static unsigned int const buf_size = QPDFCryptoImpl::rijndael_buf_size; static bool use_static_iv; std::shared_ptr<QPDFCryptoImpl> crypto; bool encrypt; bool cbc_mode; bool first; size_t offset; // offset into memory buffer std::unique_ptr<unsigned char[]> key; size_t key_bytes; unsigned char inbuf[buf_size]; unsigned char outbuf[buf_size]; unsigned char cbc_block[buf_size]; unsigned char specified_iv[buf_size]; bool use_zero_iv; bool use_specified_iv; bool disable_padding; }; #endif // PL_AES_PDF_HH
713
605
// RUN: %clang_cc1 -triple x86_64-linux-gnu -fsyntax-only -verify -fexceptions -fcxx-exceptions %s -std=c++14 // expected-error@+2 {{attribute 'target_clones' multiversioned functions do not yet support function templates}} template<typename T, typename U> int __attribute__((target_clones("sse4.2", "default"))) foo(){ return 1;} void uses_lambda() { // expected-error@+1 {{attribute 'target_clones' multiversioned functions do not yet support lambdas}} auto x = []()__attribute__((target_clones("sse4.2", "arch=ivybridge", "default"))) {}; x(); }
193
1,078
<reponame>anael-seghezzi/CToy #ifndef AL_UINTMAP_H #define AL_UINTMAP_H #include "AL/al.h" #include "rwlock.h" typedef struct UIntMap { struct { ALuint key; ALvoid *value; } *array; ALsizei size; ALsizei maxsize; ALsizei limit; RWLock lock; } UIntMap; extern UIntMap TlsDestructor; void InitUIntMap(UIntMap *map, ALsizei limit); void ResetUIntMap(UIntMap *map); ALenum InsertUIntMapEntry(UIntMap *map, ALuint key, ALvoid *value); ALvoid *RemoveUIntMapKey(UIntMap *map, ALuint key); ALvoid *LookupUIntMapKey(UIntMap *map, ALuint key); inline void LockUIntMapRead(UIntMap *map) { ReadLock(&map->lock); } inline void UnlockUIntMapRead(UIntMap *map) { ReadUnlock(&map->lock); } inline void LockUIntMapWrite(UIntMap *map) { WriteLock(&map->lock); } inline void UnlockUIntMapWrite(UIntMap *map) { WriteUnlock(&map->lock); } #endif /* AL_UINTMAP_H */
374
4,095
<gh_stars>1000+ /** * Core package for Redis Command Interface support through {@link io.lettuce.core.dynamic.RedisCommandFactory}. */ package io.lettuce.core.dynamic;
53
530
/******************************************************************************* * Copyright 2014 <NAME>. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.bladecoder.engineeditor.ui.panels; import com.badlogic.gdx.scenes.scene2d.InputEvent; import com.badlogic.gdx.scenes.scene2d.ui.ButtonGroup; import com.badlogic.gdx.scenes.scene2d.ui.HorizontalGroup; import com.badlogic.gdx.scenes.scene2d.ui.Label; import com.badlogic.gdx.scenes.scene2d.ui.Skin; import com.badlogic.gdx.scenes.scene2d.ui.Table; import com.badlogic.gdx.scenes.scene2d.ui.TextButton; import com.badlogic.gdx.scenes.scene2d.utils.ClickListener; import com.badlogic.gdx.utils.Align; import com.badlogic.gdx.utils.Array; abstract public class ScopePanel extends Table { public static String WORLD_SCOPE = "World"; public static String SCENE_SCOPE = "Scene"; public static String ACTOR_SCOPE = "Actor"; private ButtonGroup<TextButton> buttonGroup; private HorizontalGroup hPanel; private Skin skin; public ScopePanel(Skin skin) { super(skin); this.skin = skin; buttonGroup = new ButtonGroup<TextButton>(); hPanel = new HorizontalGroup(); hPanel.wrap(true); hPanel.rowAlign(Align.left); buttonGroup.setMaxCheckCount(1); buttonGroup.setMinCheckCount(1); buttonGroup.setUncheckLast(true); hPanel.addActor(new Label("Scope: ", skin)); addButton(WORLD_SCOPE); addButton(SCENE_SCOPE); addButton(ACTOR_SCOPE); add(hPanel).expandX().fillX().center(); buttonGroup.getButtons().get(2).setChecked(true); } private void addButton(String name) { TextButton button = new TextButton(name, skin); buttonGroup.add(button); hPanel.addActor(button); button.addListener(new ClickListener() { @Override public void clicked (InputEvent event, float x, float y) { changeScope((TextButton)event.getListenerActor()); } }); } public int getSelectedIndex() { for(int i=0; i < buttonGroup.getButtons().size; i++) { if(buttonGroup.getButtons().get(i) == buttonGroup.getChecked()) return i; } return -1; } public void changeScope(TextButton b) { b.setChecked(true); scopeChanged(b.getText().toString()); } abstract public void scopeChanged(String scope); public String getScope() { return buttonGroup.getChecked().getText().toString(); } public void clear() { Array<TextButton> buttons = buttonGroup.getButtons(); buttons.clear(); hPanel.clear(); } }
1,040
1,821
<filename>util-benchmark/src/main/scala/com/twitter/json/TestFormat.java package com.twitter.json; public enum TestFormat { json, csv, tsv; }
57
1,350
<reponame>Shashi-rk/azure-sdk-for-java // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. // Code generated by Microsoft (R) AutoRest Code Generator. package com.azure.resourcemanager.datafactory.models; import com.azure.core.annotation.Fluent; import com.azure.core.util.logging.ClientLogger; import com.fasterxml.jackson.annotation.JsonAnyGetter; import com.fasterxml.jackson.annotation.JsonAnySetter; import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonProperty; import java.time.OffsetDateTime; import java.util.HashMap; import java.util.Map; /** Trigger runs. */ @Fluent public final class TriggerRun { @JsonIgnore private final ClientLogger logger = new ClientLogger(TriggerRun.class); /* * Trigger run id. */ @JsonProperty(value = "triggerRunId", access = JsonProperty.Access.WRITE_ONLY) private String triggerRunId; /* * Trigger name. */ @JsonProperty(value = "triggerName", access = JsonProperty.Access.WRITE_ONLY) private String triggerName; /* * Trigger type. */ @JsonProperty(value = "triggerType", access = JsonProperty.Access.WRITE_ONLY) private String triggerType; /* * Trigger run start time. */ @JsonProperty(value = "triggerRunTimestamp", access = JsonProperty.Access.WRITE_ONLY) private OffsetDateTime triggerRunTimestamp; /* * Trigger run status. */ @JsonProperty(value = "status", access = JsonProperty.Access.WRITE_ONLY) private TriggerRunStatus status; /* * Trigger error message. */ @JsonProperty(value = "message", access = JsonProperty.Access.WRITE_ONLY) private String message; /* * List of property name and value related to trigger run. Name, value pair * depends on type of trigger. */ @JsonProperty(value = "properties", access = JsonProperty.Access.WRITE_ONLY) @JsonInclude(value = JsonInclude.Include.NON_NULL, content = JsonInclude.Include.ALWAYS) private Map<String, String> properties; /* * List of pipeline name and run Id triggered by the trigger run. */ @JsonProperty(value = "triggeredPipelines", access = JsonProperty.Access.WRITE_ONLY) @JsonInclude(value = JsonInclude.Include.NON_NULL, content = JsonInclude.Include.ALWAYS) private Map<String, String> triggeredPipelines; /* * Run dimension for which trigger was fired. */ @JsonProperty(value = "runDimension", access = JsonProperty.Access.WRITE_ONLY) @JsonInclude(value = JsonInclude.Include.NON_NULL, content = JsonInclude.Include.ALWAYS) private Map<String, String> runDimension; /* * Status of the upstream pipelines. */ @JsonProperty(value = "dependencyStatus", access = JsonProperty.Access.WRITE_ONLY) @JsonInclude(value = JsonInclude.Include.NON_NULL, content = JsonInclude.Include.ALWAYS) private Map<String, Object> dependencyStatus; /* * Trigger runs. */ @JsonIgnore private Map<String, Object> additionalProperties; /** * Get the triggerRunId property: Trigger run id. * * @return the triggerRunId value. */ public String triggerRunId() { return this.triggerRunId; } /** * Get the triggerName property: Trigger name. * * @return the triggerName value. */ public String triggerName() { return this.triggerName; } /** * Get the triggerType property: Trigger type. * * @return the triggerType value. */ public String triggerType() { return this.triggerType; } /** * Get the triggerRunTimestamp property: Trigger run start time. * * @return the triggerRunTimestamp value. */ public OffsetDateTime triggerRunTimestamp() { return this.triggerRunTimestamp; } /** * Get the status property: Trigger run status. * * @return the status value. */ public TriggerRunStatus status() { return this.status; } /** * Get the message property: Trigger error message. * * @return the message value. */ public String message() { return this.message; } /** * Get the properties property: List of property name and value related to trigger run. Name, value pair depends on * type of trigger. * * @return the properties value. */ public Map<String, String> properties() { return this.properties; } /** * Get the triggeredPipelines property: List of pipeline name and run Id triggered by the trigger run. * * @return the triggeredPipelines value. */ public Map<String, String> triggeredPipelines() { return this.triggeredPipelines; } /** * Get the runDimension property: Run dimension for which trigger was fired. * * @return the runDimension value. */ public Map<String, String> runDimension() { return this.runDimension; } /** * Get the dependencyStatus property: Status of the upstream pipelines. * * @return the dependencyStatus value. */ public Map<String, Object> dependencyStatus() { return this.dependencyStatus; } /** * Get the additionalProperties property: Trigger runs. * * @return the additionalProperties value. */ @JsonAnyGetter public Map<String, Object> additionalProperties() { return this.additionalProperties; } /** * Set the additionalProperties property: Trigger runs. * * @param additionalProperties the additionalProperties value to set. * @return the TriggerRun object itself. */ public TriggerRun withAdditionalProperties(Map<String, Object> additionalProperties) { this.additionalProperties = additionalProperties; return this; } @JsonAnySetter void withAdditionalProperties(String key, Object value) { if (additionalProperties == null) { additionalProperties = new HashMap<>(); } additionalProperties.put(key, value); } /** * Validates the instance. * * @throws IllegalArgumentException thrown if the instance is not valid. */ public void validate() { } }
2,336
1,144
/****************************************************************************** * Product: Adempiere ERP & CRM Smart Business Solution * * Copyright (C) 1999-2007 ComPiere, Inc. All Rights Reserved. * * This program is free software, you can redistribute it and/or modify it * * under the terms version 2 of the GNU General Public License as published * * by the Free Software Foundation. This program is distributed in the hope * * that it will be useful, but WITHOUT ANY WARRANTY, without even the implied * * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * * with this program, if not, write to the Free Software Foundation, Inc., * * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * For the text or an alternative of this public license, you may reach us * * ComPiere, Inc., 2620 Augustine Dr. #245, Santa Clara, CA 95054, USA * * or via <EMAIL> or http://www.compiere.org/license.html * *****************************************************************************/ /** Generated Model - DO NOT CHANGE */ package org.compiere.model; import java.math.BigDecimal; import java.sql.ResultSet; import java.sql.Timestamp; import java.util.Properties; import org.compiere.util.Env; import org.compiere.util.KeyNamePair; /** Generated Model for I_FAJournal * @author Adempiere (generated) * @version Release 3.5.4a - $Id$ */ public class X_I_FAJournal extends PO implements I_I_FAJournal, I_Persistent { /** * */ private static final long serialVersionUID = 20090915L; /** Standard Constructor */ public X_I_FAJournal (Properties ctx, int I_FAJournal_ID, String trxName) { super (ctx, I_FAJournal_ID, trxName); /** if (I_FAJournal_ID == 0) { setI_FAJournal_ID (0); setI_IsImported (false); } */ } /** Load Constructor */ public X_I_FAJournal (Properties ctx, ResultSet rs, String trxName) { super (ctx, rs, trxName); } /** AccessLevel * @return 7 - System - Client - Org */ protected int get_AccessLevel() { return accessLevel.intValue(); } /** Load Meta Data */ protected POInfo initPO (Properties ctx) { POInfo poi = POInfo.getPOInfo (ctx, Table_ID, get_TrxName()); return poi; } public String toString() { StringBuffer sb = new StringBuffer ("X_I_FAJournal[") .append(get_ID()).append("]"); return sb.toString(); } /** Set Asset. @param A_Asset_ID Asset used internally or by customers */ public void setA_Asset_ID (int A_Asset_ID) { if (A_Asset_ID < 1) set_Value (COLUMNNAME_A_Asset_ID, null); else set_Value (COLUMNNAME_A_Asset_ID, Integer.valueOf(A_Asset_ID)); } /** Get Asset. @return Asset used internally or by customers */ public int getA_Asset_ID () { Integer ii = (Integer)get_Value(COLUMNNAME_A_Asset_ID); if (ii == null) return 0; return ii.intValue(); } public I_C_ElementValue getAccount() throws RuntimeException { return (I_C_ElementValue)MTable.get(getCtx(), I_C_ElementValue.Table_Name) .getPO(getAccount_ID(), get_TrxName()); } /** Set Account. @param Account_ID Account used */ public void setAccount_ID (int Account_ID) { if (Account_ID < 1) set_Value (COLUMNNAME_Account_ID, null); else set_Value (COLUMNNAME_Account_ID, Integer.valueOf(Account_ID)); } /** Get Account. @return Account used */ public int getAccount_ID () { Integer ii = (Integer)get_Value(COLUMNNAME_Account_ID); if (ii == null) return 0; return ii.intValue(); } /** Set Account Key. @param AccountValue Key of Account Element */ public void setAccountValue (String AccountValue) { set_Value (COLUMNNAME_AccountValue, AccountValue); } /** Get Account Key. @return Key of Account Element */ public String getAccountValue () { return (String)get_Value(COLUMNNAME_AccountValue); } /** Set Account Schema Name. @param AcctSchemaName Name of the Accounting Schema */ public void setAcctSchemaName (String AcctSchemaName) { set_Value (COLUMNNAME_AcctSchemaName, AcctSchemaName); } /** Get Account Schema Name. @return Name of the Accounting Schema */ public String getAcctSchemaName () { return (String)get_Value(COLUMNNAME_AcctSchemaName); } /** Set Document Org. @param AD_OrgDoc_ID Document Organization (independent from account organization) */ public void setAD_OrgDoc_ID (int AD_OrgDoc_ID) { if (AD_OrgDoc_ID < 1) set_Value (COLUMNNAME_AD_OrgDoc_ID, null); else set_Value (COLUMNNAME_AD_OrgDoc_ID, Integer.valueOf(AD_OrgDoc_ID)); } /** Get Document Org. @return Document Organization (independent from account organization) */ public int getAD_OrgDoc_ID () { Integer ii = (Integer)get_Value(COLUMNNAME_AD_OrgDoc_ID); if (ii == null) return 0; return ii.intValue(); } /** Set Trx Organization. @param AD_OrgTrx_ID Performing or initiating organization */ public void setAD_OrgTrx_ID (int AD_OrgTrx_ID) { if (AD_OrgTrx_ID < 1) set_Value (COLUMNNAME_AD_OrgTrx_ID, null); else set_Value (COLUMNNAME_AD_OrgTrx_ID, Integer.valueOf(AD_OrgTrx_ID)); } /** Get Trx Organization. @return Performing or initiating organization */ public int getAD_OrgTrx_ID () { Integer ii = (Integer)get_Value(COLUMNNAME_AD_OrgTrx_ID); if (ii == null) return 0; return ii.intValue(); } /** Set Entry Type. @param A_Entry_Type Entry Type */ public void setA_Entry_Type (String A_Entry_Type) { set_Value (COLUMNNAME_A_Entry_Type, A_Entry_Type); } /** Get Entry Type. @return Entry Type */ public String getA_Entry_Type () { return (String)get_Value(COLUMNNAME_A_Entry_Type); } /** Set Accounted Credit. @param AmtAcctCr Accounted Credit Amount */ public void setAmtAcctCr (BigDecimal AmtAcctCr) { set_Value (COLUMNNAME_AmtAcctCr, AmtAcctCr); } /** Get Accounted Credit. @return Accounted Credit Amount */ public BigDecimal getAmtAcctCr () { BigDecimal bd = (BigDecimal)get_Value(COLUMNNAME_AmtAcctCr); if (bd == null) return Env.ZERO; return bd; } /** Set Accounted Debit. @param AmtAcctDr Accounted Debit Amount */ public void setAmtAcctDr (BigDecimal AmtAcctDr) { set_Value (COLUMNNAME_AmtAcctDr, AmtAcctDr); } /** Get Accounted Debit. @return Accounted Debit Amount */ public BigDecimal getAmtAcctDr () { BigDecimal bd = (BigDecimal)get_Value(COLUMNNAME_AmtAcctDr); if (bd == null) return Env.ZERO; return bd; } /** Set Source Credit. @param AmtSourceCr Source Credit Amount */ public void setAmtSourceCr (BigDecimal AmtSourceCr) { set_Value (COLUMNNAME_AmtSourceCr, AmtSourceCr); } /** Get Source Credit. @return Source Credit Amount */ public BigDecimal getAmtSourceCr () { BigDecimal bd = (BigDecimal)get_Value(COLUMNNAME_AmtSourceCr); if (bd == null) return Env.ZERO; return bd; } /** Set Source Debit. @param AmtSourceDr Source Debit Amount */ public void setAmtSourceDr (BigDecimal AmtSourceDr) { set_Value (COLUMNNAME_AmtSourceDr, AmtSourceDr); } /** Get Source Debit. @return Source Debit Amount */ public BigDecimal getAmtSourceDr () { BigDecimal bd = (BigDecimal)get_Value(COLUMNNAME_AmtSourceDr); if (bd == null) return Env.ZERO; return bd; } /** Set Batch Description. @param BatchDescription Description of the Batch */ public void setBatchDescription (String BatchDescription) { set_Value (COLUMNNAME_BatchDescription, BatchDescription); } /** Get Batch Description. @return Description of the Batch */ public String getBatchDescription () { return (String)get_Value(COLUMNNAME_BatchDescription); } /** Set Batch Document No. @param BatchDocumentNo Document Number of the Batch */ public void setBatchDocumentNo (String BatchDocumentNo) { set_Value (COLUMNNAME_BatchDocumentNo, BatchDocumentNo); } /** Get Batch Document No. @return Document Number of the Batch */ public String getBatchDocumentNo () { return (String)get_Value(COLUMNNAME_BatchDocumentNo); } /** Set Business Partner Key. @param BPartnerValue Key of the Business Partner */ public void setBPartnerValue (String BPartnerValue) { set_Value (COLUMNNAME_BPartnerValue, BPartnerValue); } /** Get Business Partner Key. @return Key of the Business Partner */ public String getBPartnerValue () { return (String)get_Value(COLUMNNAME_BPartnerValue); } public I_C_AcctSchema getC_AcctSchema() throws RuntimeException { return (I_C_AcctSchema)MTable.get(getCtx(), I_C_AcctSchema.Table_Name) .getPO(getC_AcctSchema_ID(), get_TrxName()); } /** Set Accounting Schema. @param C_AcctSchema_ID Rules for accounting */ public void setC_AcctSchema_ID (int C_AcctSchema_ID) { if (C_AcctSchema_ID < 1) set_Value (COLUMNNAME_C_AcctSchema_ID, null); else set_Value (COLUMNNAME_C_AcctSchema_ID, Integer.valueOf(C_AcctSchema_ID)); } /** Get Accounting Schema. @return Rules for accounting */ public int getC_AcctSchema_ID () { Integer ii = (Integer)get_Value(COLUMNNAME_C_AcctSchema_ID); if (ii == null) return 0; return ii.intValue(); } public I_C_Activity getC_Activity() throws RuntimeException { return (I_C_Activity)MTable.get(getCtx(), I_C_Activity.Table_Name) .getPO(getC_Activity_ID(), get_TrxName()); } /** Set Activity. @param C_Activity_ID Business Activity */ public void setC_Activity_ID (int C_Activity_ID) { if (C_Activity_ID < 1) set_Value (COLUMNNAME_C_Activity_ID, null); else set_Value (COLUMNNAME_C_Activity_ID, Integer.valueOf(C_Activity_ID)); } /** Get Activity. @return Business Activity */ public int getC_Activity_ID () { Integer ii = (Integer)get_Value(COLUMNNAME_C_Activity_ID); if (ii == null) return 0; return ii.intValue(); } /** Set Category Name. @param CategoryName Name of the Category */ public void setCategoryName (String CategoryName) { set_Value (COLUMNNAME_CategoryName, CategoryName); } /** Get Category Name. @return Name of the Category */ public String getCategoryName () { return (String)get_Value(COLUMNNAME_CategoryName); } public I_C_BPartner getC_BPartner() throws RuntimeException { return (I_C_BPartner)MTable.get(getCtx(), I_C_BPartner.Table_Name) .getPO(getC_BPartner_ID(), get_TrxName()); } /** Set Business Partner . @param C_BPartner_ID Identifies a Business Partner */ public void setC_BPartner_ID (int C_BPartner_ID) { if (C_BPartner_ID < 1) set_Value (COLUMNNAME_C_BPartner_ID, null); else set_Value (COLUMNNAME_C_BPartner_ID, Integer.valueOf(C_BPartner_ID)); } /** Get Business Partner . @return Identifies a Business Partner */ public int getC_BPartner_ID () { Integer ii = (Integer)get_Value(COLUMNNAME_C_BPartner_ID); if (ii == null) return 0; return ii.intValue(); } public I_C_Campaign getC_Campaign() throws RuntimeException { return (I_C_Campaign)MTable.get(getCtx(), I_C_Campaign.Table_Name) .getPO(getC_Campaign_ID(), get_TrxName()); } /** Set Campaign. @param C_Campaign_ID Marketing Campaign */ public void setC_Campaign_ID (int C_Campaign_ID) { if (C_Campaign_ID < 1) set_Value (COLUMNNAME_C_Campaign_ID, null); else set_Value (COLUMNNAME_C_Campaign_ID, Integer.valueOf(C_Campaign_ID)); } /** Get Campaign. @return Marketing Campaign */ public int getC_Campaign_ID () { Integer ii = (Integer)get_Value(COLUMNNAME_C_Campaign_ID); if (ii == null) return 0; return ii.intValue(); } /** Set Currency Type. @param C_ConversionType_ID Currency Conversion Rate Type */ public void setC_ConversionType_ID (int C_ConversionType_ID) { if (C_ConversionType_ID < 1) set_Value (COLUMNNAME_C_ConversionType_ID, null); else set_Value (COLUMNNAME_C_ConversionType_ID, Integer.valueOf(C_ConversionType_ID)); } /** Get Currency Type. @return Currency Conversion Rate Type */ public int getC_ConversionType_ID () { Integer ii = (Integer)get_Value(COLUMNNAME_C_ConversionType_ID); if (ii == null) return 0; return ii.intValue(); } public I_C_Currency getC_Currency() throws RuntimeException { return (I_C_Currency)MTable.get(getCtx(), I_C_Currency.Table_Name) .getPO(getC_Currency_ID(), get_TrxName()); } /** Set Currency. @param C_Currency_ID The Currency for this record */ public void setC_Currency_ID (int C_Currency_ID) { if (C_Currency_ID < 1) set_Value (COLUMNNAME_C_Currency_ID, null); else set_Value (COLUMNNAME_C_Currency_ID, Integer.valueOf(C_Currency_ID)); } /** Get Currency. @return The Currency for this record */ public int getC_Currency_ID () { Integer ii = (Integer)get_Value(COLUMNNAME_C_Currency_ID); if (ii == null) return 0; return ii.intValue(); } public I_C_DocType getC_DocType() throws RuntimeException { return (I_C_DocType)MTable.get(getCtx(), I_C_DocType.Table_Name) .getPO(getC_DocType_ID(), get_TrxName()); } /** Set Document Type. @param C_DocType_ID Document type or rules */ public void setC_DocType_ID (int C_DocType_ID) { if (C_DocType_ID < 0) set_Value (COLUMNNAME_C_DocType_ID, null); else set_Value (COLUMNNAME_C_DocType_ID, Integer.valueOf(C_DocType_ID)); } /** Get Document Type. @return Document type or rules */ public int getC_DocType_ID () { Integer ii = (Integer)get_Value(COLUMNNAME_C_DocType_ID); if (ii == null) return 0; return ii.intValue(); } /** Set Client Key. @param ClientValue Key of the Client */ public void setClientValue (String ClientValue) { set_Value (COLUMNNAME_ClientValue, ClientValue); } /** Get Client Key. @return Key of the Client */ public String getClientValue () { return (String)get_Value(COLUMNNAME_ClientValue); } /** Set Location From. @param C_LocFrom_ID Location that inventory was moved from */ public void setC_LocFrom_ID (int C_LocFrom_ID) { if (C_LocFrom_ID < 1) set_Value (COLUMNNAME_C_LocFrom_ID, null); else set_Value (COLUMNNAME_C_LocFrom_ID, Integer.valueOf(C_LocFrom_ID)); } /** Get Location From. @return Location that inventory was moved from */ public int getC_LocFrom_ID () { Integer ii = (Integer)get_Value(COLUMNNAME_C_LocFrom_ID); if (ii == null) return 0; return ii.intValue(); } /** Set Location To. @param C_LocTo_ID Location that inventory was moved to */ public void setC_LocTo_ID (int C_LocTo_ID) { if (C_LocTo_ID < 1) set_Value (COLUMNNAME_C_LocTo_ID, null); else set_Value (COLUMNNAME_C_LocTo_ID, Integer.valueOf(C_LocTo_ID)); } /** Get Location To. @return Location that inventory was moved to */ public int getC_LocTo_ID () { Integer ii = (Integer)get_Value(COLUMNNAME_C_LocTo_ID); if (ii == null) return 0; return ii.intValue(); } /** Set Currency Type Key. @param ConversionTypeValue Key value for the Currency Conversion Rate Type */ public void setConversionTypeValue (String ConversionTypeValue) { set_Value (COLUMNNAME_ConversionTypeValue, ConversionTypeValue); } /** Get Currency Type Key. @return Key value for the Currency Conversion Rate Type */ public String getConversionTypeValue () { return (String)get_Value(COLUMNNAME_ConversionTypeValue); } public I_C_Period getC_Period() throws RuntimeException { return (I_C_Period)MTable.get(getCtx(), I_C_Period.Table_Name) .getPO(getC_Period_ID(), get_TrxName()); } /** Set Period. @param C_Period_ID Period of the Calendar */ public void setC_Period_ID (int C_Period_ID) { if (C_Period_ID < 1) set_Value (COLUMNNAME_C_Period_ID, null); else set_Value (COLUMNNAME_C_Period_ID, Integer.valueOf(C_Period_ID)); } /** Get Period. @return Period of the Calendar */ public int getC_Period_ID () { Integer ii = (Integer)get_Value(COLUMNNAME_C_Period_ID); if (ii == null) return 0; return ii.intValue(); } public I_C_Project getC_Project() throws RuntimeException { return (I_C_Project)MTable.get(getCtx(), I_C_Project.Table_Name) .getPO(getC_Project_ID(), get_TrxName()); } /** Set Project. @param C_Project_ID Financial Project */ public void setC_Project_ID (int C_Project_ID) { if (C_Project_ID < 1) set_Value (COLUMNNAME_C_Project_ID, null); else set_Value (COLUMNNAME_C_Project_ID, Integer.valueOf(C_Project_ID)); } /** Get Project. @return Financial Project */ public int getC_Project_ID () { Integer ii = (Integer)get_Value(COLUMNNAME_C_Project_ID); if (ii == null) return 0; return ii.intValue(); } public I_C_SalesRegion getC_SalesRegion() throws RuntimeException { return (I_C_SalesRegion)MTable.get(getCtx(), I_C_SalesRegion.Table_Name) .getPO(getC_SalesRegion_ID(), get_TrxName()); } /** Set Sales Region. @param C_SalesRegion_ID Sales coverage region */ public void setC_SalesRegion_ID (int C_SalesRegion_ID) { if (C_SalesRegion_ID < 1) set_Value (COLUMNNAME_C_SalesRegion_ID, null); else set_Value (COLUMNNAME_C_SalesRegion_ID, Integer.valueOf(C_SalesRegion_ID)); } /** Get Sales Region. @return Sales coverage region */ public int getC_SalesRegion_ID () { Integer ii = (Integer)get_Value(COLUMNNAME_C_SalesRegion_ID); if (ii == null) return 0; return ii.intValue(); } /** Set Sub Account. @param C_SubAcct_ID Sub account for Element Value */ public void setC_SubAcct_ID (int C_SubAcct_ID) { if (C_SubAcct_ID < 1) set_Value (COLUMNNAME_C_SubAcct_ID, null); else set_Value (COLUMNNAME_C_SubAcct_ID, Integer.valueOf(C_SubAcct_ID)); } /** Get Sub Account. @return Sub account for Element Value */ public int getC_SubAcct_ID () { Integer ii = (Integer)get_Value(COLUMNNAME_C_SubAcct_ID); if (ii == null) return 0; return ii.intValue(); } public I_C_UOM getC_UOM() throws RuntimeException { return (I_C_UOM)MTable.get(getCtx(), I_C_UOM.Table_Name) .getPO(getC_UOM_ID(), get_TrxName()); } /** Set UOM. @param C_UOM_ID Unit of Measure */ public void setC_UOM_ID (int C_UOM_ID) { if (C_UOM_ID < 1) set_Value (COLUMNNAME_C_UOM_ID, null); else set_Value (COLUMNNAME_C_UOM_ID, Integer.valueOf(C_UOM_ID)); } /** Get UOM. @return Unit of Measure */ public int getC_UOM_ID () { Integer ii = (Integer)get_Value(COLUMNNAME_C_UOM_ID); if (ii == null) return 0; return ii.intValue(); } /** Set Rate. @param CurrencyRate Currency Conversion Rate */ public void setCurrencyRate (BigDecimal CurrencyRate) { set_Value (COLUMNNAME_CurrencyRate, CurrencyRate); } /** Get Rate. @return Currency Conversion Rate */ public BigDecimal getCurrencyRate () { BigDecimal bd = (BigDecimal)get_Value(COLUMNNAME_CurrencyRate); if (bd == null) return Env.ZERO; return bd; } /** CurrencyRateType AD_Reference_ID=111 */ public static final int CURRENCYRATETYPE_AD_Reference_ID=111; /** Spot = S */ public static final String CURRENCYRATETYPE_Spot = "S"; /** Period End = P */ public static final String CURRENCYRATETYPE_PeriodEnd = "P"; /** None = N */ public static final String CURRENCYRATETYPE_None = "N"; /** Fixed = F */ public static final String CURRENCYRATETYPE_Fixed = "F"; /** Average = A */ public static final String CURRENCYRATETYPE_Average = "A"; /** Company = C */ public static final String CURRENCYRATETYPE_Company = "C"; /** User Type = U */ public static final String CURRENCYRATETYPE_UserType = "U"; /** Manual Rate = M */ public static final String CURRENCYRATETYPE_ManualRate = "M"; /** Set CurrencyRateType. @param CurrencyRateType CurrencyRateType */ public void setCurrencyRateType (String CurrencyRateType) { set_Value (COLUMNNAME_CurrencyRateType, CurrencyRateType); } /** Get CurrencyRateType. @return CurrencyRateType */ public String getCurrencyRateType () { return (String)get_Value(COLUMNNAME_CurrencyRateType); } public I_C_ValidCombination getC_ValidCombination() throws RuntimeException { return (I_C_ValidCombination)MTable.get(getCtx(), I_C_ValidCombination.Table_Name) .getPO(getC_ValidCombination_ID(), get_TrxName()); } /** Set Combination. @param C_ValidCombination_ID Valid Account Combination */ public void setC_ValidCombination_ID (int C_ValidCombination_ID) { if (C_ValidCombination_ID < 1) set_Value (COLUMNNAME_C_ValidCombination_ID, null); else set_Value (COLUMNNAME_C_ValidCombination_ID, Integer.valueOf(C_ValidCombination_ID)); } /** Get Combination. @return Valid Account Combination */ public int getC_ValidCombination_ID () { Integer ii = (Integer)get_Value(COLUMNNAME_C_ValidCombination_ID); if (ii == null) return 0; return ii.intValue(); } /** Set Account Date. @param DateAcct Accounting Date */ public void setDateAcct (Timestamp DateAcct) { set_Value (COLUMNNAME_DateAcct, DateAcct); } /** Get Account Date. @return Accounting Date */ public Timestamp getDateAcct () { return (Timestamp)get_Value(COLUMNNAME_DateAcct); } /** Set Description. @param Description Optional short description of the record */ public void setDescription (String Description) { set_Value (COLUMNNAME_Description, Description); } /** Get Description. @return Optional short description of the record */ public String getDescription () { return (String)get_Value(COLUMNNAME_Description); } /** Set Document Type Name. @param DocTypeName Name of the Document Type */ public void setDocTypeName (String DocTypeName) { set_Value (COLUMNNAME_DocTypeName, DocTypeName); } /** Get Document Type Name. @return Name of the Document Type */ public String getDocTypeName () { return (String)get_Value(COLUMNNAME_DocTypeName); } public I_GL_Budget getGL_Budget() throws RuntimeException { return (I_GL_Budget)MTable.get(getCtx(), I_GL_Budget.Table_Name) .getPO(getGL_Budget_ID(), get_TrxName()); } /** Set Budget. @param GL_Budget_ID General Ledger Budget */ public void setGL_Budget_ID (int GL_Budget_ID) { if (GL_Budget_ID < 1) set_Value (COLUMNNAME_GL_Budget_ID, null); else set_Value (COLUMNNAME_GL_Budget_ID, Integer.valueOf(GL_Budget_ID)); } /** Get Budget. @return General Ledger Budget */ public int getGL_Budget_ID () { Integer ii = (Integer)get_Value(COLUMNNAME_GL_Budget_ID); if (ii == null) return 0; return ii.intValue(); } public I_GL_Category getGL_Category() throws RuntimeException { return (I_GL_Category)MTable.get(getCtx(), I_GL_Category.Table_Name) .getPO(getGL_Category_ID(), get_TrxName()); } /** Set GL Category. @param GL_Category_ID General Ledger Category */ public void setGL_Category_ID (int GL_Category_ID) { if (GL_Category_ID < 1) set_Value (COLUMNNAME_GL_Category_ID, null); else set_Value (COLUMNNAME_GL_Category_ID, Integer.valueOf(GL_Category_ID)); } /** Get GL Category. @return General Ledger Category */ public int getGL_Category_ID () { Integer ii = (Integer)get_Value(COLUMNNAME_GL_Category_ID); if (ii == null) return 0; return ii.intValue(); } public I_GL_JournalBatch getGL_JournalBatch() throws RuntimeException { return (I_GL_JournalBatch)MTable.get(getCtx(), I_GL_JournalBatch.Table_Name) .getPO(getGL_JournalBatch_ID(), get_TrxName()); } /** Set Journal Batch. @param GL_JournalBatch_ID General Ledger Journal Batch */ public void setGL_JournalBatch_ID (int GL_JournalBatch_ID) { if (GL_JournalBatch_ID < 1) set_Value (COLUMNNAME_GL_JournalBatch_ID, null); else set_Value (COLUMNNAME_GL_JournalBatch_ID, Integer.valueOf(GL_JournalBatch_ID)); } /** Get Journal Batch. @return General Ledger Journal Batch */ public int getGL_JournalBatch_ID () { Integer ii = (Integer)get_Value(COLUMNNAME_GL_JournalBatch_ID); if (ii == null) return 0; return ii.intValue(); } public I_GL_Journal getGL_Journal() throws RuntimeException { return (I_GL_Journal)MTable.get(getCtx(), I_GL_Journal.Table_Name) .getPO(getGL_Journal_ID(), get_TrxName()); } /** Set Journal. @param GL_Journal_ID General Ledger Journal */ public void setGL_Journal_ID (int GL_Journal_ID) { if (GL_Journal_ID < 1) set_Value (COLUMNNAME_GL_Journal_ID, null); else set_Value (COLUMNNAME_GL_Journal_ID, Integer.valueOf(GL_Journal_ID)); } /** Get Journal. @return General Ledger Journal */ public int getGL_Journal_ID () { Integer ii = (Integer)get_Value(COLUMNNAME_GL_Journal_ID); if (ii == null) return 0; return ii.intValue(); } public I_GL_JournalLine getGL_JournalLine() throws RuntimeException { return (I_GL_JournalLine)MTable.get(getCtx(), I_GL_JournalLine.Table_Name) .getPO(getGL_JournalLine_ID(), get_TrxName()); } /** Set Journal Line. @param GL_JournalLine_ID General Ledger Journal Line */ public void setGL_JournalLine_ID (int GL_JournalLine_ID) { if (GL_JournalLine_ID < 1) set_Value (COLUMNNAME_GL_JournalLine_ID, null); else set_Value (COLUMNNAME_GL_JournalLine_ID, Integer.valueOf(GL_JournalLine_ID)); } /** Get Journal Line. @return General Ledger Journal Line */ public int getGL_JournalLine_ID () { Integer ii = (Integer)get_Value(COLUMNNAME_GL_JournalLine_ID); if (ii == null) return 0; return ii.intValue(); } /** Set Import Error Message. @param I_ErrorMsg Messages generated from import process */ public void setI_ErrorMsg (String I_ErrorMsg) { set_Value (COLUMNNAME_I_ErrorMsg, I_ErrorMsg); } /** Get Import Error Message. @return Messages generated from import process */ public String getI_ErrorMsg () { return (String)get_Value(COLUMNNAME_I_ErrorMsg); } /** Set I_FAJournal_ID. @param I_FAJournal_ID I_FAJournal_ID */ public void setI_FAJournal_ID (int I_FAJournal_ID) { if (I_FAJournal_ID < 1) set_ValueNoCheck (COLUMNNAME_I_FAJournal_ID, null); else set_ValueNoCheck (COLUMNNAME_I_FAJournal_ID, Integer.valueOf(I_FAJournal_ID)); } /** Get I_FAJournal_ID. @return I_FAJournal_ID */ public int getI_FAJournal_ID () { Integer ii = (Integer)get_Value(COLUMNNAME_I_FAJournal_ID); if (ii == null) return 0; return ii.intValue(); } /** Get Record ID/ColumnName @return ID/ColumnName pair */ public KeyNamePair getKeyNamePair() { return new KeyNamePair(get_ID(), String.valueOf(getI_FAJournal_ID())); } /** Set Imported. @param I_IsImported Has this import been processed */ public void setI_IsImported (boolean I_IsImported) { set_Value (COLUMNNAME_I_IsImported, Boolean.valueOf(I_IsImported)); } /** Get Imported. @return Has this import been processed */ public boolean isI_IsImported () { Object oo = get_Value(COLUMNNAME_I_IsImported); if (oo != null) { if (oo instanceof Boolean) return ((Boolean)oo).booleanValue(); return "Y".equals(oo); } return false; } /** Set Depreciate. @param IsDepreciated The asset will be depreciated */ public void setIsDepreciated (String IsDepreciated) { set_Value (COLUMNNAME_IsDepreciated, IsDepreciated); } /** Get Depreciate. @return The asset will be depreciated */ public String getIsDepreciated () { return (String)get_Value(COLUMNNAME_IsDepreciated); } /** Set ISO Currency Code. @param ISO_Code Three letter ISO 4217 Code of the Currency */ public void setISO_Code (String ISO_Code) { set_Value (COLUMNNAME_ISO_Code, ISO_Code); } /** Get ISO Currency Code. @return Three letter ISO 4217 Code of the Currency */ public String getISO_Code () { return (String)get_Value(COLUMNNAME_ISO_Code); } /** Set Journal Document No. @param JournalDocumentNo Document number of the Journal */ public void setJournalDocumentNo (String JournalDocumentNo) { set_Value (COLUMNNAME_JournalDocumentNo, JournalDocumentNo); } /** Get Journal Document No. @return Document number of the Journal */ public String getJournalDocumentNo () { return (String)get_Value(COLUMNNAME_JournalDocumentNo); } /** Set Line No. @param Line Unique line for this document */ public void setLine (int Line) { set_Value (COLUMNNAME_Line, Integer.valueOf(Line)); } /** Get Line No. @return Unique line for this document */ public int getLine () { Integer ii = (Integer)get_Value(COLUMNNAME_Line); if (ii == null) return 0; return ii.intValue(); } public I_M_Product getM_Product() throws RuntimeException { return (I_M_Product)MTable.get(getCtx(), I_M_Product.Table_Name) .getPO(getM_Product_ID(), get_TrxName()); } /** Set Product. @param M_Product_ID Product, Service, Item */ public void setM_Product_ID (int M_Product_ID) { if (M_Product_ID < 1) set_Value (COLUMNNAME_M_Product_ID, null); else set_Value (COLUMNNAME_M_Product_ID, Integer.valueOf(M_Product_ID)); } /** Get Product. @return Product, Service, Item */ public int getM_Product_ID () { Integer ii = (Integer)get_Value(COLUMNNAME_M_Product_ID); if (ii == null) return 0; return ii.intValue(); } /** Set Trx Org Key. @param OrgTrxValue Key of the Transaction Organization */ public void setOrgTrxValue (String OrgTrxValue) { set_Value (COLUMNNAME_OrgTrxValue, OrgTrxValue); } /** Get Trx Org Key. @return Key of the Transaction Organization */ public String getOrgTrxValue () { return (String)get_Value(COLUMNNAME_OrgTrxValue); } /** Set Org Key. @param OrgValue Key of the Organization */ public void setOrgValue (String OrgValue) { set_Value (COLUMNNAME_OrgValue, OrgValue); } /** Get Org Key. @return Key of the Organization */ public String getOrgValue () { return (String)get_Value(COLUMNNAME_OrgValue); } /** PostingType AD_Reference_ID=125 */ public static final int POSTINGTYPE_AD_Reference_ID=125; /** Actual = A */ public static final String POSTINGTYPE_Actual = "A"; /** Budget = B */ public static final String POSTINGTYPE_Budget = "B"; /** Commitment = E */ public static final String POSTINGTYPE_Commitment = "E"; /** Statistical = S */ public static final String POSTINGTYPE_Statistical = "S"; /** Reservation = R */ public static final String POSTINGTYPE_Reservation = "R"; /** Set PostingType. @param PostingType The type of posted amount for the transaction */ public void setPostingType (String PostingType) { set_Value (COLUMNNAME_PostingType, PostingType); } /** Get PostingType. @return The type of posted amount for the transaction */ public String getPostingType () { return (String)get_Value(COLUMNNAME_PostingType); } /** Set Processed. @param Processed The document has been processed */ public void setProcessed (boolean Processed) { set_Value (COLUMNNAME_Processed, Boolean.valueOf(Processed)); } /** Get Processed. @return The document has been processed */ public boolean isProcessed () { Object oo = get_Value(COLUMNNAME_Processed); if (oo != null) { if (oo instanceof Boolean) return ((Boolean)oo).booleanValue(); return "Y".equals(oo); } return false; } /** Set Process Now. @param Processing Process Now */ public void setProcessing (boolean Processing) { set_Value (COLUMNNAME_Processing, Boolean.valueOf(Processing)); } /** Get Process Now. @return Process Now */ public boolean isProcessing () { Object oo = get_Value(COLUMNNAME_Processing); if (oo != null) { if (oo instanceof Boolean) return ((Boolean)oo).booleanValue(); return "Y".equals(oo); } return false; } /** Set Product Key. @param ProductValue Key of the Product */ public void setProductValue (String ProductValue) { set_Value (COLUMNNAME_ProductValue, ProductValue); } /** Get Product Key. @return Key of the Product */ public String getProductValue () { return (String)get_Value(COLUMNNAME_ProductValue); } /** Set Project Key. @param ProjectValue Key of the Project */ public void setProjectValue (String ProjectValue) { set_Value (COLUMNNAME_ProjectValue, ProjectValue); } /** Get Project Key. @return Key of the Project */ public String getProjectValue () { return (String)get_Value(COLUMNNAME_ProjectValue); } /** Set Quantity. @param Qty Quantity */ public void setQty (BigDecimal Qty) { set_Value (COLUMNNAME_Qty, Qty); } /** Get Quantity. @return Quantity */ public BigDecimal getQty () { BigDecimal bd = (BigDecimal)get_Value(COLUMNNAME_Qty); if (bd == null) return Env.ZERO; return bd; } /** Set SKU. @param SKU Stock Keeping Unit */ public void setSKU (String SKU) { set_Value (COLUMNNAME_SKU, SKU); } /** Get SKU. @return Stock Keeping Unit */ public String getSKU () { return (String)get_Value(COLUMNNAME_SKU); } /** Set UPC/EAN. @param UPC Bar Code (Universal Product Code or its superset European Article Number) */ public void setUPC (String UPC) { set_Value (COLUMNNAME_UPC, UPC); } /** Get UPC/EAN. @return Bar Code (Universal Product Code or its superset European Article Number) */ public String getUPC () { return (String)get_Value(COLUMNNAME_UPC); } public I_C_ElementValue getUser1() throws RuntimeException { return (I_C_ElementValue)MTable.get(getCtx(), I_C_ElementValue.Table_Name) .getPO(getUser1_ID(), get_TrxName()); } /** Set User List 1. @param User1_ID User defined list element #1 */ public void setUser1_ID (int User1_ID) { if (User1_ID < 1) set_Value (COLUMNNAME_User1_ID, null); else set_Value (COLUMNNAME_User1_ID, Integer.valueOf(User1_ID)); } /** Get User List 1. @return User defined list element #1 */ public int getUser1_ID () { Integer ii = (Integer)get_Value(COLUMNNAME_User1_ID); if (ii == null) return 0; return ii.intValue(); } public I_C_ElementValue getUser2() throws RuntimeException { return (I_C_ElementValue)MTable.get(getCtx(), I_C_ElementValue.Table_Name) .getPO(getUser2_ID(), get_TrxName()); } /** Set User List 2. @param User2_ID User defined list element #2 */ public void setUser2_ID (int User2_ID) { if (User2_ID < 1) set_Value (COLUMNNAME_User2_ID, null); else set_Value (COLUMNNAME_User2_ID, Integer.valueOf(User2_ID)); } /** Get User List 2. @return User defined list element #2 */ public int getUser2_ID () { Integer ii = (Integer)get_Value(COLUMNNAME_User2_ID); if (ii == null) return 0; return ii.intValue(); } /** Set User Element 1. @param UserElement1_ID User defined accounting Element */ public void setUserElement1_ID (int UserElement1_ID) { if (UserElement1_ID < 1) set_Value (COLUMNNAME_UserElement1_ID, null); else set_Value (COLUMNNAME_UserElement1_ID, Integer.valueOf(UserElement1_ID)); } /** Get User Element 1. @return User defined accounting Element */ public int getUserElement1_ID () { Integer ii = (Integer)get_Value(COLUMNNAME_UserElement1_ID); if (ii == null) return 0; return ii.intValue(); } /** Set User Element 2. @param UserElement2_ID User defined accounting Element */ public void setUserElement2_ID (int UserElement2_ID) { if (UserElement2_ID < 1) set_Value (COLUMNNAME_UserElement2_ID, null); else set_Value (COLUMNNAME_UserElement2_ID, Integer.valueOf(UserElement2_ID)); } /** Get User Element 2. @return User defined accounting Element */ public int getUserElement2_ID () { Integer ii = (Integer)get_Value(COLUMNNAME_UserElement2_ID); if (ii == null) return 0; return ii.intValue(); } }
14,342
321
<gh_stars>100-1000 #include "curl_http_form.h" #include "curl/curl.h" //#include <zlib.h> namespace network { CurlHttpForm::CurlHttpForm() : formpost_(nullptr), lastptr_(nullptr) { } CurlHttpForm::~CurlHttpForm() { if (formpost_) { curl_formfree(formpost_); } } void CurlHttpForm::AddFileSection(const std::string &name, const std::string &path, const std::string& mime) { if (mime.empty()) { curl_formadd( &formpost_, &lastptr_, CURLFORM_COPYNAME, name.c_str(), CURLFORM_FILE, path.c_str(), CURLFORM_FILENAME, "log.gz", CURLFORM_END); } else { curl_formadd( &formpost_, &lastptr_, CURLFORM_COPYNAME, name.c_str(), CURLFORM_FILE, path.c_str(), CURLFORM_CONTENTTYPE, mime.c_str(), CURLFORM_END); } } void CurlHttpForm::AddStringSection(const std::string &field, const std::string &value) { curl_formadd( &formpost_, &lastptr_, CURLFORM_COPYNAME, field.c_str(), CURLFORM_COPYCONTENTS, value.c_str(), CURLFORM_END); } struct curl_httppost* CurlHttpForm::GetForm() const { return formpost_; } } // end of namespace network
523
2,542
// ------------------------------------------------------------ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License (MIT). See License.txt in the repo root for license information. // ------------------------------------------------------------ #pragma once namespace Management { namespace FileStoreService { class PendingWriteOperations { DENY_COPY(PendingWriteOperations); public: explicit PendingWriteOperations() : closed_(false), set_(), setLock_() { } ~PendingWriteOperations() { } Common::ErrorCode TryAdd(std::wstring const & key, bool const isFolder); Common::ErrorCode Contains(std::wstring const & key, __out bool & contains); bool Remove(std::wstring const & key); void Clear(); size_t Count(); // closes the set, so that no more additions are allowed std::set<std::wstring, Common::IsLessCaseInsensitiveComparer<std::wstring>> Close(); private: // TODO: A tree implementation would // make TryAdd more effecient std::set<std::wstring, Common::IsLessCaseInsensitiveComparer<std::wstring>> set_; Common::RwLock setLock_; bool closed_; }; } }
588
518
{ "name": "Mandrill", "category": "Marketing & Analytics", "start_url": "https://mandrillapp.com/login/", "icons": [ { "src": "https://cdn.filestackcontent.com/8K0tiOfDRCyMSbudXkUk", "platform": "browserx" } ], "theme_color": "#C02439", "scope": "https://mandrillapp.com", "bx_legacy_service_id": "mandrill" }
154
2,291
<filename>issues/osmdroid_issue328.json { "id" : 328, "status" : "Fixed", "summary" : "Make MapView \"all arguments\" constructor public or protected", "labels" : [ "Type-Enhancement", "Priority-Medium" ], "stars" : 0, "commentCount" : 4, "comments" : [ { "id" : 0, "commenterId" : -8473345269847656908, "content" : "<b>What steps will reproduce the problem?</b>\n1. Place a MapView in an XML resource. The MapView will be constructed using the standard XML two argument constructor which in turn calls the full constructor with defaults filled in. There's no way to override these defaults in a subclass because that constructor is private.\r\n\r\n<b>What is the expected output? What do you see instead?</b>\n\r\n\r\n<b>What version of the product are you using? On what operating system?</b>\n\r\n\r\n<b>Please provide any additional information below.</b>\n\r\n", "timestamp" : 1333465120, "attachments" : [ ] }, { "id" : 1, "commenterId" : 8937367184059112911, "content" : "If you use the Mapview in a resource then it's always going to use the two argument constructor.\r\n\r\nIf you want options then you want them made available via the attributes.\r\n\r\nWhat attributes do you want added?", "timestamp" : 1333516960, "attachments" : [ ] }, { "id" : 2, "commenterId" : -8473345269847656908, "content" : "Hi thanks for the feedback. I'd like to subclass MapView and be able to call \r\n\r\nprivate MapView(final Context context, final int tileSizePixels,\r\n final ResourceProxy resourceProxy, MapTileProviderBase tileProvider,\r\n final Handler tileRequestCompleteHandler, final AttributeSet attrs) ;\r\n\r\nfrom my subclass's constructor. The reason is that the subclass can then use my custom tileprovider/resourceproxy while getting instantiated from a layout XML. I don't see any other way to achieve that currently.", "timestamp" : 1333525908, "attachments" : [ ] }, { "id" : 3, "commenterId" : 8937367184059112911, "content" : "This issue was closed by revision r1085.", "timestamp" : 1333600507, "attachments" : [ ] } ] }
769
436
/* * * Copyright (C) 2020 iQIYI (www.iqiyi.com) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package org.qiyi.basecore.taskmanager; import org.qiyi.basecore.taskmanager.other.TMLog; import org.qiyi.basecore.taskmanager.pool.ObjectPool; /** * 用于运行并发任务的 wrapper */ class ParallelTaskWrapper extends TaskWrapper { private volatile int taskIndex; private ParallelRequest mTaskRequest; private final String TAG = "TM_ParallelTaskWrapper"; private boolean takenRun; public ParallelTaskWrapper(ParallelRequest taskRequest, int index) { super(null); taskIndex = index; mTaskRequest = taskRequest; } @Override protected void runTask() { int taskId; do { taskId = taskIndex; //并发任务中 override this and return index task Task task = mTaskRequest.getTaskAt(taskIndex); int stateCheck = task.compareAndSetState(Task.STATE_RUNNING); if (TM.isFullLogEnabled()) { TMLog.d("TaskManager", task.getName() + " in wrapper " + stateCheck + " " + takenRun + " " + mTaskRequest); } // this is to make sure the taken onse can run if (stateCheck < 0 || (takenRun && stateCheck == Task.STATE_RUNNING)) { takenRun = false; task.setWrapper(this); task.doBeforeTask(); task.doTask(); task.doAfterTask(); mTaskRequest.onTaskStateChange(taskId, Task.STATE_FINISHED); } else { mTaskRequest.requestNextIdle(this); TMLog.d(TAG, "running state was changed , before run : task might be executed more than once"); } } while (taskId != taskIndex); //taskIndex might be changed during call : mTaskRequest.onTaskStateChange(taskIndex, Task.STATE_FINISHED) } public void changeTask(int taskId) { taskIndex = taskId; takenRun = true; if (TM.isFullLogEnabled()) { TMLog.d(TAG, " launchD task>>> index is changed " + taskId + this); } } @Override protected void onRequestChange() { taskIndex = 0; } @Override public String toString() { if (mTaskRequest != null) { Task task = mTaskRequest.getTaskAt(taskIndex); if (task != null) { return task.getName() + " " + task.getTaskId() + super.toString(); } } return super.toString(); } public void set(ParallelRequest taskRequest, int index) { super.set(null); taskIndex = index; mTaskRequest = taskRequest; } public static ParallelTaskWrapper obtain(ParallelRequest taskRequest, int index) { ParallelTaskWrapper wrapper = ObjectPool.obtain(ParallelTaskWrapper.class); if (wrapper == null) { return new ParallelTaskWrapper(taskRequest, index); } wrapper.set(taskRequest, index); return wrapper; } @Override public void recycle() { super.recycle(); mTaskRequest = null; taskIndex = 0; takenRun = false; } }
1,529
354
<reponame>VetalDev/VK-GL-CTS /* WARNING: This is auto-generated file. Do not modify, since changes will * be lost! Modify the generating script instead. */ void getCoreDeviceExtensionsImpl (uint32_t coreVersion, ::std::vector<const char*>& dst) { if (coreVersion >= VK_API_VERSION_1_2) { dst.push_back("VK_KHR_sampler_mirror_clamp_to_edge"); dst.push_back("VK_KHR_shader_float16_int8"); dst.push_back("VK_KHR_imageless_framebuffer"); dst.push_back("VK_KHR_create_renderpass2"); dst.push_back("VK_KHR_image_format_list"); dst.push_back("VK_KHR_draw_indirect_count"); dst.push_back("VK_KHR_shader_subgroup_extended_types"); dst.push_back("VK_KHR_8bit_storage"); dst.push_back("VK_KHR_shader_atomic_int64"); dst.push_back("VK_KHR_driver_properties"); dst.push_back("VK_KHR_shader_float_controls"); dst.push_back("VK_KHR_depth_stencil_resolve"); dst.push_back("VK_KHR_timeline_semaphore"); dst.push_back("VK_KHR_vulkan_memory_model"); dst.push_back("VK_KHR_spirv_1_4"); dst.push_back("VK_KHR_separate_depth_stencil_layouts"); dst.push_back("VK_KHR_uniform_buffer_standard_layout"); dst.push_back("VK_KHR_buffer_device_address"); dst.push_back("VK_EXT_sampler_filter_minmax"); dst.push_back("VK_EXT_descriptor_indexing"); dst.push_back("VK_EXT_shader_viewport_index_layer"); dst.push_back("VK_EXT_scalar_block_layout"); dst.push_back("VK_EXT_separate_stencil_usage"); dst.push_back("VK_EXT_host_query_reset"); } if (coreVersion >= VK_API_VERSION_1_1) { dst.push_back("VK_KHR_multiview"); dst.push_back("VK_KHR_device_group"); dst.push_back("VK_KHR_shader_draw_parameters"); dst.push_back("VK_KHR_maintenance1"); dst.push_back("VK_KHR_external_memory"); dst.push_back("VK_KHR_external_semaphore"); dst.push_back("VK_KHR_16bit_storage"); dst.push_back("VK_KHR_descriptor_update_template"); dst.push_back("VK_KHR_external_fence"); dst.push_back("VK_KHR_maintenance2"); dst.push_back("VK_KHR_variable_pointers"); dst.push_back("VK_KHR_dedicated_allocation"); dst.push_back("VK_KHR_storage_buffer_storage_class"); dst.push_back("VK_KHR_relaxed_block_layout"); dst.push_back("VK_KHR_get_memory_requirements2"); dst.push_back("VK_KHR_sampler_ycbcr_conversion"); dst.push_back("VK_KHR_bind_memory2"); dst.push_back("VK_KHR_maintenance3"); } } void getCoreInstanceExtensionsImpl (uint32_t coreVersion, ::std::vector<const char*>& dst) { if (coreVersion >= VK_API_VERSION_1_1) { dst.push_back("VK_KHR_get_physical_device_properties2"); dst.push_back("VK_KHR_device_group_creation"); dst.push_back("VK_KHR_external_memory_capabilities"); dst.push_back("VK_KHR_external_semaphore_capabilities"); dst.push_back("VK_KHR_external_fence_capabilities"); } }
1,212
922
<gh_stars>100-1000 // // AppController.h // Web2PDF Server // // Created by Jürgen on 13.09.06. // Copyright 2006 Cultured Code. // License: Creative Commons Attribution 2.5 License // http://creativecommons.org/licenses/by/2.5/ // #import <Cocoa/Cocoa.h> @class GCDWebServer; @interface WebServerController : NSObject { } @property(strong) GCDWebServer * httpServer; @property(assign) NSUInteger webServerPort; - (void)startProcessing; - (void)stopProcessing; @end
186
9,156
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.pulsar.proxy.server; import static org.apache.commons.lang3.StringUtils.isBlank; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.net.URI; import java.nio.ByteBuffer; import java.security.cert.X509Certificate; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.Iterator; import java.util.Objects; import java.util.Set; import java.util.concurrent.Executor; import javax.net.ssl.SSLContext; import javax.servlet.ServletConfig; import javax.servlet.ServletException; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.apache.pulsar.broker.web.AuthenticationFilter; import org.apache.pulsar.client.api.Authentication; import org.apache.pulsar.client.api.AuthenticationDataProvider; import org.apache.pulsar.client.api.AuthenticationFactory; import org.apache.pulsar.client.api.PulsarClientException; import org.apache.pulsar.common.util.SecurityUtility; import org.apache.pulsar.policies.data.loadbalancer.ServiceLookupData; import org.eclipse.jetty.client.HttpClient; import org.eclipse.jetty.client.HttpRequest; import org.eclipse.jetty.client.ProtocolHandlers; import org.eclipse.jetty.client.RedirectProtocolHandler; import org.eclipse.jetty.client.api.ContentProvider; import org.eclipse.jetty.client.api.Request; import org.eclipse.jetty.client.http.HttpClientTransportOverHTTP; import org.eclipse.jetty.http.HttpHeader; import org.eclipse.jetty.proxy.ProxyServlet; import org.eclipse.jetty.util.HttpCookieStore; import org.eclipse.jetty.util.ssl.SslContextFactory; import org.eclipse.jetty.util.thread.QueuedThreadPool; import org.slf4j.Logger; import org.slf4j.LoggerFactory; class AdminProxyHandler extends ProxyServlet { private static final Logger LOG = LoggerFactory.getLogger(AdminProxyHandler.class); private static final String ORIGINAL_PRINCIPAL_HEADER = "X-Original-Principal"; private static final Set<String> functionRoutes = new HashSet<>(Arrays.asList( "/admin/v3/function", "/admin/v2/function", "/admin/function", "/admin/v3/source", "/admin/v2/source", "/admin/source", "/admin/v3/sink", "/admin/v2/sink", "/admin/sink", "/admin/v2/worker", "/admin/v2/worker-stats", "/admin/worker", "/admin/worker-stats" )); private final ProxyConfiguration config; private final BrokerDiscoveryProvider discoveryProvider; private final String brokerWebServiceUrl; private final String functionWorkerWebServiceUrl; AdminProxyHandler(ProxyConfiguration config, BrokerDiscoveryProvider discoveryProvider) { this.config = config; this.discoveryProvider = discoveryProvider; this.brokerWebServiceUrl = config.isTlsEnabledWithBroker() ? config.getBrokerWebServiceURLTLS() : config.getBrokerWebServiceURL(); this.functionWorkerWebServiceUrl = config.isTlsEnabledWithBroker() ? config.getFunctionWorkerWebServiceURLTLS() : config.getFunctionWorkerWebServiceURL(); super.setTimeout(config.getHttpProxyTimeout()); } @Override protected HttpClient createHttpClient() throws ServletException { ServletConfig config = getServletConfig(); HttpClient client = newHttpClient(); client.setFollowRedirects(true); // Must not store cookies, otherwise cookies of different clients will mix. client.setCookieStore(new HttpCookieStore.Empty()); Executor executor; String value = config.getInitParameter("maxThreads"); if (value == null || "-".equals(value)) { executor = (Executor) getServletContext().getAttribute("org.eclipse.jetty.server.Executor"); if (executor == null) { throw new IllegalStateException("No server executor for proxy"); } } else { QueuedThreadPool qtp = new QueuedThreadPool(Integer.parseInt(value)); String servletName = config.getServletName(); int dot = servletName.lastIndexOf('.'); if (dot >= 0) { servletName = servletName.substring(dot + 1); } qtp.setName(servletName); executor = qtp; } client.setExecutor(executor); value = config.getInitParameter("maxConnections"); if (value == null) { value = "256"; } client.setMaxConnectionsPerDestination(Integer.parseInt(value)); value = config.getInitParameter("idleTimeout"); if (value == null) { value = "30000"; } client.setIdleTimeout(Long.parseLong(value)); value = config.getInitParameter("requestBufferSize"); if (value != null) { client.setRequestBufferSize(Integer.parseInt(value)); } value = config.getInitParameter("responseBufferSize"); if (value != null){ client.setResponseBufferSize(Integer.parseInt(value)); } try { client.start(); // Content must not be decoded, otherwise the client gets confused. client.getContentDecoderFactories().clear(); // Pass traffic to the client, only intercept what's necessary. ProtocolHandlers protocolHandlers = client.getProtocolHandlers(); protocolHandlers.clear(); protocolHandlers.put(new RedirectProtocolHandler(client)); return client; } catch (Exception x) { throw new ServletException(x); } } // This class allows the request body to be replayed, the default implementation // does not protected class ReplayableProxyContentProvider extends ProxyInputStreamContentProvider { static final int MIN_REPLAY_BODY_BUFFER_SIZE = 64; private boolean bodyBufferAvailable = false; private boolean bodyBufferMaxSizeReached = false; private final ByteArrayOutputStream bodyBuffer; private final long httpInputMaxReplayBufferSize; protected ReplayableProxyContentProvider(HttpServletRequest request, HttpServletResponse response, Request proxyRequest, InputStream input, int httpInputMaxReplayBufferSize) { super(request, response, proxyRequest, input); bodyBuffer = new ByteArrayOutputStream( Math.min(Math.max(request.getContentLength(), MIN_REPLAY_BODY_BUFFER_SIZE), httpInputMaxReplayBufferSize)); this.httpInputMaxReplayBufferSize = httpInputMaxReplayBufferSize; } @Override public Iterator<ByteBuffer> iterator() { if (bodyBufferAvailable) { return Collections.singleton(ByteBuffer.wrap(bodyBuffer.toByteArray())).iterator(); } else { bodyBufferAvailable = true; return super.iterator(); } } @Override protected ByteBuffer onRead(byte[] buffer, int offset, int length) { if (!bodyBufferMaxSizeReached) { if (bodyBuffer.size() + length < httpInputMaxReplayBufferSize) { bodyBuffer.write(buffer, offset, length); } else { bodyBufferMaxSizeReached = true; bodyBufferAvailable = false; bodyBuffer.reset(); } } return super.onRead(buffer, offset, length); } } private static class JettyHttpClient extends HttpClient { private static final int NUMBER_OF_SELECTOR_THREADS = 1; public JettyHttpClient() { super(new HttpClientTransportOverHTTP(NUMBER_OF_SELECTOR_THREADS), null); } public JettyHttpClient(SslContextFactory sslContextFactory) { super(new HttpClientTransportOverHTTP(NUMBER_OF_SELECTOR_THREADS), sslContextFactory); } /** * Ensure the Authorization header is carried over after a 307 redirect * from brokers. */ @Override protected Request copyRequest(HttpRequest oldRequest, URI newURI) { String authorization = oldRequest.getHeaders().get(HttpHeader.AUTHORIZATION); Request newRequest = super.copyRequest(oldRequest, newURI); if (authorization != null) { newRequest.header(HttpHeader.AUTHORIZATION, authorization); } return newRequest; } } @Override protected ContentProvider proxyRequestContent(HttpServletRequest request, HttpServletResponse response, Request proxyRequest) throws IOException { return new ReplayableProxyContentProvider(request, response, proxyRequest, request.getInputStream(), config.getHttpInputMaxReplayBufferSize()); } @Override protected HttpClient newHttpClient() { try { Authentication auth = AuthenticationFactory.create( config.getBrokerClientAuthenticationPlugin(), config.getBrokerClientAuthenticationParameters() ); Objects.requireNonNull(auth, "No supported auth found for proxy"); auth.start(); if (config.isTlsEnabledWithBroker()) { try { X509Certificate[] trustCertificates = SecurityUtility .loadCertificatesFromPemFile(config.getBrokerClientTrustCertsFilePath()); SSLContext sslCtx; AuthenticationDataProvider authData = auth.getAuthData(); if (authData.hasDataForTls()) { sslCtx = SecurityUtility.createSslContext( config.isTlsAllowInsecureConnection(), trustCertificates, authData.getTlsCertificates(), authData.getTlsPrivateKey() ); } else { sslCtx = SecurityUtility.createSslContext( config.isTlsAllowInsecureConnection(), trustCertificates ); } SslContextFactory contextFactory = new SslContextFactory.Client(true); contextFactory.setSslContext(sslCtx); return new JettyHttpClient(contextFactory); } catch (Exception e) { LOG.error("new jetty http client exception ", e); try { auth.close(); } catch (IOException ioe) { LOG.error("Failed to close the authentication service", ioe); } throw new PulsarClientException.InvalidConfigurationException(e.getMessage()); } } } catch (PulsarClientException e) { throw new RuntimeException(e); } // return an unauthenticated client, every request will fail. return new JettyHttpClient(); } @Override protected String rewriteTarget(HttpServletRequest request) { StringBuilder url = new StringBuilder(); boolean isFunctionsRestRequest = false; String requestUri = request.getRequestURI(); for (String routePrefix : functionRoutes) { if (requestUri.startsWith(routePrefix)) { isFunctionsRestRequest = true; break; } } if (isFunctionsRestRequest && !isBlank(functionWorkerWebServiceUrl)) { url.append(functionWorkerWebServiceUrl); } else if (isBlank(brokerWebServiceUrl)) { try { ServiceLookupData availableBroker = discoveryProvider.nextBroker(); if (config.isTlsEnabledWithBroker()) { url.append(availableBroker.getWebServiceUrlTls()); } else { url.append(availableBroker.getWebServiceUrl()); } if (LOG.isDebugEnabled()) { LOG.debug("[{}:{}] Selected active broker is {}", request.getRemoteAddr(), request.getRemotePort(), url); } } catch (Exception e) { LOG.warn("[{}:{}] Failed to get next active broker {}", request.getRemoteAddr(), request.getRemotePort(), e.getMessage(), e); return null; } } else { url.append(brokerWebServiceUrl); } if (url.lastIndexOf("/") == url.length() - 1) { url.deleteCharAt(url.lastIndexOf("/")); } url.append(requestUri); String query = request.getQueryString(); if (query != null) { url.append("?").append(query); } URI rewrittenUrl = URI.create(url.toString()).normalize(); if (!validateDestination(rewrittenUrl.getHost(), rewrittenUrl.getPort())) { return null; } return rewrittenUrl.toString(); } @Override protected void addProxyHeaders(HttpServletRequest clientRequest, Request proxyRequest) { super.addProxyHeaders(clientRequest, proxyRequest); String user = (String) clientRequest.getAttribute(AuthenticationFilter.AuthenticatedRoleAttributeName); if (user != null) { proxyRequest.header(ORIGINAL_PRINCIPAL_HEADER, user); } } }
6,197
1,171
<gh_stars>1000+ # # Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/ # Written by <NAME> <<EMAIL>>, # <NAME> <<EMAIL>> # import os from os import getenv import unittest import numpy as np import torch from torch.nn.init import normal_ from fast_transformers.aggregate import aggregate, broadcast from fast_transformers.hashing import compute_hashes from fast_transformers.clustering.hamming import cluster from fast_transformers.sparse_product import clustered_sparse_weighted_average from fast_transformers.sparse_product import clustered_sparse_dot_product from fast_transformers.masking import LengthMask def cluster_queries(Q, query_lengths, C, I, B): N, H, L, E = Q.shape planes = Q.new_empty((B, E+1)) normal_(planes) planes[:, -1] = 0 hashes = compute_hashes(Q.view(N*H*L, E), planes).view(N, H, L) # Cluster the hashes and return the cluster index per query groups, counts = cluster( hashes, query_lengths, clusters=C, iterations=I, bits=B ) return groups, counts class TestSparseWeightedAverage(unittest.TestCase): @classmethod def setUpClass(cls): if not torch.cuda.is_available(): raise unittest.SkipTest("No CUDA capable device detected") @property def device(self): return "cuda" def _zero_grad(self, Q, K): for x in [Q, K]: if x.grad is not None: x.grad[...] = 0 def test_correctness_masked(self): N = 12 H = 6 L = 1000 S = 1000 E = 32 k = 32 C = 100 I = 10 B = 32 for exp in range(30): N = np.random.randint(1, 6) H = np.random.randint(1, 8) C = np.random.randint(10, 500) L = np.random.randint(C, 2000) E = np.random.randint(10, 128) S = np.random.randint(100, 1000) k = np.random.randint(10, 64) if os.getenv("VERBOSE_TESTS", ""): print(("Testing Masked: N H L S E C k: " "{} {} {} {} {} {} {}").format(N, H, L, S, E, C, k)) Q = torch.randn(N, H, L, E).to(self.device) K = torch.randn(N, H, S, E).to(self.device) lengths = np.random.randint(C, L+1, N) lengths = torch.tensor(lengths, dtype=torch.int32).to(self.device) lengths[0] = L query_lengths = LengthMask( lengths, max_len=L ) groups, counts = cluster_queries(Q, lengths, C, I, B) sorted_g, sorted_gi = torch.sort(groups.view(N*H, -1), dim=-1) sorted_rev_gi = torch.argsort(sorted_gi, dim=-1) q_offset = torch.arange(N*H, device=Q.device).unsqueeze(-1) * L q_flat = (sorted_gi + q_offset).reshape(-1) s_queries = Q.reshape(-1, E).index_select( 0, q_flat).view(N, H, L, E) Q_grouped = aggregate( s_queries, sorted_g.view(N, H, L), 1/counts.float() ) QK = torch.einsum("nhle,nhse->nhls", Q_grouped, K) _, topk = torch.topk(QK, k, dim=-1) topk = topk.contiguous() topk_broadcast = broadcast( topk.float(), groups, torch.ones_like(counts, dtype=torch.float32), torch.zeros((N, H, L, k), device=Q.device) ) weights_sorted = torch.rand( N, H, L, k).to(self.device).requires_grad_(True) weights_sorted.retain_grad() q_rev_flat = (sorted_rev_gi + q_offset).reshape(-1) weights = torch.clone( weights_sorted.reshape(-1, k).index_select( 0, q_rev_flat ).view(N, H, L, k) ) weights.retain_grad() values = torch.randn( N, H, S, E).to(self.device).requires_grad_(True) self._zero_grad(weights, values) values_selected = values[ torch.arange(N).view(N, 1, 1, 1).to(self.device), torch.arange(H).view(1, H, 1, 1).to(self.device), topk_broadcast.long() ] output = (weights.unsqueeze(-1)*values_selected).sum(-2) output = output * query_lengths.float_matrix[:, None, :, None] output.sum().backward() grad = [torch.clone(weights.grad), torch.clone(values.grad)] self._zero_grad(weights_sorted, values) self._zero_grad(weights, values) output_hat_sorted = clustered_sparse_weighted_average( weights_sorted, values, topk, groups, counts ) output_hat = output_hat_sorted.reshape( -1, E).index_select(0, q_rev_flat).view(N, H, L, E) self.assertLess( torch.abs(output - output_hat).max(), 1e-4 ) output_hat.sum().backward() weights_grad_sorted = torch.clone(weights_sorted.grad) weights_grad = torch.clone( weights_grad_sorted.reshape(-1, k).index_select( 0, q_rev_flat).view(N, H, L, k) ) grad_hat = [weights_grad, torch.clone(values.grad)] for g1, g2 in zip(grad, grad_hat): self.assertLess( torch.abs(g1 - g2).max(), 1e-3 ) def test_correctness(self): N = 12 H = 6 L = 1000 S = 1000 E = 32 k = 32 C = 100 I = 10 B = 32 for exp in range(30): N = np.random.randint(1, 3) H = np.random.randint(1, 4) C = np.random.randint(10, 500) L = np.random.randint(C, 2000) E = np.random.randint(10, 128) S = np.random.randint(100, 1000) k = np.random.randint(10, 64) if os.getenv("VERBOSE_TESTS", ""): print(("Testing: N H L S E C k: " "{} {} {} {} {} {} {}").format(N, H, L, S, E, C, k)) Q = torch.randn(N, H, L, E).to(self.device) K = torch.randn(N, H, S, E).to(self.device) lengths = torch.full((N,), L, dtype=torch.int32).to(self.device) groups, counts = cluster_queries(Q, lengths, C, I, B) sorted_g, sorted_gi = torch.sort(groups.view(N*H, -1), dim=-1) sorted_rev_gi = torch.argsort(sorted_gi, dim=-1) q_offset = torch.arange(N*H, device=Q.device).unsqueeze(-1) * L q_flat = (sorted_gi + q_offset).reshape(-1) s_queries = Q.reshape(-1, E).index_select( 0, q_flat).view(N, H, L, E) Q_grouped = aggregate( s_queries, sorted_g.view(N, H, L), 1/counts.float() ) QK = torch.einsum("nhle,nhse->nhls", Q_grouped, K) _, topk = torch.topk(QK, k, dim=-1) topk = topk.contiguous() topk_broadcast = broadcast( topk.float(), groups, torch.ones_like(counts, dtype=torch.float32), torch.zeros((N, H, L, k), device=Q.device) ) weights_sorted = torch.rand( N, H, L, k).to(self.device).requires_grad_(True) weights_sorted.retain_grad() q_rev_flat = (sorted_rev_gi + q_offset).reshape(-1) weights = torch.clone( weights_sorted.reshape(-1, k).index_select( 0, q_rev_flat).view(N, H, L, k) ) weights.retain_grad() values = torch.randn(N, H, S, E).to(self.device).requires_grad_(True) self._zero_grad(weights, values) values_selected = values[ torch.arange(N).view(N, 1, 1, 1).to(self.device), torch.arange(H).view(1, H, 1, 1).to(self.device), topk_broadcast.long() ] output = (weights.unsqueeze(-1)*values_selected).sum(-2) output.sum().backward() grad = [torch.clone(weights.grad), torch.clone(values.grad)] self._zero_grad(weights_sorted, values) self._zero_grad(weights, values) output_hat_sorted = clustered_sparse_weighted_average( weights_sorted, values, topk, groups, counts ) output_hat = output_hat_sorted.reshape(-1, E).index_select( 0, q_rev_flat).view(N, H, L, E) self.assertLess( torch.abs(output - output_hat).max(), 1e-4 ) output_hat.sum().backward() weights_grad_sorted = torch.clone(weights_sorted.grad) weights_grad = torch.clone( weights_grad_sorted.reshape(-1, k).index_select( 0, q_rev_flat).view(N, H, L, k) ) grad_hat = [weights_grad, torch.clone(values.grad)] for g1, g2 in zip(grad, grad_hat): self.assertLess( torch.abs(g1 - g2).max(), 1e-3 ) def test_forward(self): N = 6 H = 5 L = 100 S = 100 E = 32 C = 10 I = 10 B = 32 k = 5 for exp in range(30): C = np.random.randint(10, 500) L = np.random.randint(C, 2000) E = np.random.randint(10, 128) S = np.random.randint(100, 1000) k = np.random.randint(10, 64) if os.getenv("VERBOSE_TESTS", ""): print(("Testing: N H L S E C k: " "{} {} {} {} {} {} {}").format(N, H, L, S, E, C, k)) Q = torch.randn(N, H, L, E).to(self.device) K = torch.randn(N, H, S, E).to(self.device) lengths = torch.full((N,), L, dtype=torch.int32).to(self.device) groups, counts = cluster_queries(Q, lengths, C, I, B) sorted_g, sorted_gi = torch.sort(groups.view(N*H, -1), dim=-1) sorted_rev_gi = torch.argsort(sorted_gi, dim=-1) q_offset = torch.arange(N*H, device=Q.device).unsqueeze(-1) * L q_flat = (sorted_gi + q_offset).reshape(-1) s_queries = Q.reshape(-1, E).index_select(0, q_flat).view(N, H, L, E) Q_grouped = aggregate( s_queries, sorted_g.view(N, H, L), 1/counts.float() ) QK = torch.einsum("nhle,nhse->nhls", Q_grouped, K) _, topk = torch.topk(QK, k, dim=-1) topk = topk.contiguous() topk_broadcast = broadcast( topk.float(), groups, torch.ones_like(counts, dtype=torch.float32), torch.zeros((N, H, L, k), device=Q.device) ) weights_sorted = clustered_sparse_dot_product( s_queries, K, topk, groups, counts, lengths ) weights = torch.softmax(weights_sorted, dim=-1) q_rev_flat = (sorted_rev_gi + q_offset).reshape(-1) weights = weights_sorted.reshape(-1, k).index_select( 0, q_rev_flat).view(N, H, L, k) values = torch.randn(N, H, S, E).to(self.device).requires_grad_(True) values_selected = values[ torch.arange(N).view(N, 1, 1, 1).to(self.device), torch.arange(H).view(1, H, 1, 1).to(self.device), topk_broadcast.long() ] output = (weights.unsqueeze(-1)*values_selected).sum(-2) output_hat_sorted = clustered_sparse_weighted_average( weights_sorted, values, topk, groups, counts ) output_hat = output_hat_sorted.reshape(-1, E).index_select( 0, q_rev_flat).view(N, H, L, E) self.assertLess( torch.abs(output_hat - output).max(), 1e-3 ) @unittest.skipUnless(os.getenv("BENCHMARK_TESTS", ""), "no benchmarks") def test_small_forward(self): N = 12 H = 8 L = 2000 S = 2000 E = 32 k = 32 C = 100 I = 10 B = 32 Q = torch.randn(N, H, L, E).to(self.device) K = torch.randn(N, H, S, E).to(self.device) lengths = torch.full((N,), L, dtype=torch.int32).to(self.device) groups, counts = cluster_queries(Q, lengths, C, I, B) sorted_g, sorted_gi = torch.sort(groups.view(N*H, -1), dim=-1) sorted_rev_gi = torch.argsort(sorted_gi, dim=-1) q_offset = torch.arange(N*H, device=Q.device).unsqueeze(-1) * L q_flat = (sorted_gi + q_offset).reshape(-1) s_queries = Q.reshape(-1, E).index_select(0, q_flat).view(N, H, L, E) Q_grouped = aggregate( s_queries, sorted_g.view(N, H, L), 1/counts.float() ) QK = torch.einsum("nhle,nhse->nhls", Q_grouped, K) _, topk = torch.topk(QK, k, dim=-1) topk = topk.contiguous() topk_broadcast = broadcast( topk.float(), groups, torch.ones_like(counts, dtype=torch.float32), torch.zeros((N, H, L, k), device=Q.device) ) weights_sorted = clustered_sparse_dot_product( s_queries, K, topk, groups, counts, lengths ) q_rev_flat = (sorted_rev_gi + q_offset).reshape(-1) weights = weights_sorted.reshape(-1, k).index_select( 0, q_rev_flat).view(N, H, L, k) values = torch.randn(N, H, S, E).to(self.device) for i in range(2000): output_hat = clustered_sparse_weighted_average( weights_sorted, values, topk, groups, counts ) s = torch.cuda.Event(enable_timing=True) e = torch.cuda.Event(enable_timing=True) s.record() output_hat = clustered_sparse_weighted_average( weights, values, topk, groups, counts ) e.record() torch.cuda.synchronize() t_sparse = s.elapsed_time(e) print('T_sparse Forward:{}'.format(t_sparse)) @unittest.skipUnless(os.getenv("BENCHMARK_TESTS", ""), "no benchmarks") def test_small_backward(self): N = 12 H = 8 L = 1024 S = 1024 E = 64 k = 32 C = 100 I = 10 B = 32 Q = torch.randn(N, H, L, E).to(self.device) K = torch.randn(N, H, S, E).to(self.device) lengths = torch.full((N,), L, dtype=torch.int32).to(self.device) groups, counts = cluster_queries(Q, lengths, C, I, B) sorted_g, sorted_gi = torch.sort(groups.view(N*H, -1), dim=-1) sorted_rev_gi = torch.argsort(sorted_gi, dim=-1) q_offset = torch.arange(N*H, device=Q.device).unsqueeze(-1) * L q_flat = (sorted_gi + q_offset).reshape(-1) s_queries = Q.reshape(-1, E).index_select(0, q_flat).view(N, H, L, E) Q_grouped = aggregate( s_queries, sorted_g.view(N, H, L), 1/counts.float() ) QK = torch.einsum("nhle,nhse->nhls", Q_grouped, K) _, topk = torch.topk(QK, k, dim=-1) topk = topk.contiguous() topk_broadcast = broadcast( topk.float(), groups, torch.ones_like(counts, dtype=torch.float32), torch.zeros((N, H, L, k), device=Q.device) ) weights = torch.rand(N, H, L, k).to(self.device).requires_grad_(True) values = torch.randn(N, H, S, E).to(self.device).requires_grad_(True) for i in range(2000): output_hat = clustered_sparse_weighted_average( weights, values, topk, groups, counts ) self._zero_grad(weights, values) s = torch.cuda.Event(enable_timing=True) e = torch.cuda.Event(enable_timing=True) s.record() output_hat.sum().backward() e.record() torch.cuda.synchronize() t_sparse = s.elapsed_time(e) print('T_sparse Backward:{}'.format(t_sparse)) if __name__ == "__main__": unittest.main()
8,858
459
/** * Copyright 2015-2017 DataStax, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "php_driver.h" #include "php_driver_globals.h" #include "php_driver_types.h" #include "util/future.h" #include "util/ref.h" zend_class_entry *php_driver_default_cluster_ce = NULL; static void free_session(void *session) { cass_session_free((CassSession*) session); } PHP_METHOD(DefaultCluster, connect) { char *keyspace = NULL; php5to7_size keyspace_len; zval *timeout = NULL; php_driver_cluster *self = NULL; php_driver_session *session = NULL; CassFuture *future = NULL; char *hash_key; php5to7_size hash_key_len = 0; php_driver_psession *psession; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|sz", &keyspace, &keyspace_len, &timeout) == FAILURE) { return; } self = PHP_DRIVER_GET_CLUSTER(getThis()); object_init_ex(return_value, php_driver_default_session_ce); session = PHP_DRIVER_GET_SESSION(return_value); session->default_consistency = self->default_consistency; session->default_page_size = self->default_page_size; session->persist = self->persist; if (!PHP5TO7_ZVAL_IS_UNDEF(session->default_timeout)) { PHP5TO7_ZVAL_COPY(PHP5TO7_ZVAL_MAYBE_P(session->default_timeout), PHP5TO7_ZVAL_MAYBE_P(self->default_timeout)); } if (session->persist) { php5to7_zend_resource_le *le; hash_key_len = spprintf(&hash_key, 0, "%s:session:%s", self->hash_key, SAFE_STR(keyspace)); if (PHP5TO7_ZEND_HASH_FIND(&EG(persistent_list), hash_key, hash_key_len + 1, le) && Z_RES_P(le)->type == php_le_php_driver_session()) { psession = (php_driver_psession *) Z_RES_P(le)->ptr; session->session = php_driver_add_ref(psession->session); future = psession->future; } } if (future == NULL) { php5to7_zend_resource_le resource; session->session = php_driver_new_peref(cass_session_new(), free_session, 1); if (keyspace) { future = cass_session_connect_keyspace((CassSession *) session->session->data, self->cluster, keyspace); } else { future = cass_session_connect((CassSession *) session->session->data, self->cluster); } if (session->persist) { psession = (php_driver_psession *) pecalloc(1, sizeof(php_driver_psession), 1); psession->session = php_driver_add_ref(session->session); psession->future = future; #if PHP_MAJOR_VERSION >= 7 ZVAL_NEW_PERSISTENT_RES(&resource, 0, psession, php_le_php_driver_session()); PHP5TO7_ZEND_HASH_UPDATE(&EG(persistent_list), hash_key, hash_key_len + 1, &resource, sizeof(php5to7_zend_resource_le)); PHP_DRIVER_G(persistent_sessions)++; #else resource.type = php_le_php_driver_session(); resource.ptr = psession; PHP5TO7_ZEND_HASH_UPDATE(&EG(persistent_list), hash_key, hash_key_len + 1, resource, sizeof(php5to7_zend_resource_le)); PHP_DRIVER_G(persistent_sessions)++; #endif } } if (php_driver_future_wait_timed(future, timeout TSRMLS_CC) == FAILURE) { if (session->persist) { efree(hash_key); } else { cass_future_free(future); } return; } if (php_driver_future_is_error(future TSRMLS_CC) == FAILURE) { if (session->persist) { (void) PHP5TO7_ZEND_HASH_DEL(&EG(persistent_list), hash_key, hash_key_len + 1); efree(hash_key); } else { cass_future_free(future); } return; } if (session->persist) efree(hash_key); } PHP_METHOD(DefaultCluster, connectAsync) { char *hash_key = NULL; php5to7_size hash_key_len = 0; char *keyspace = NULL; php5to7_size keyspace_len; php_driver_cluster *self = NULL; php_driver_future_session *future = NULL; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|s", &keyspace, &keyspace_len) == FAILURE) { return; } self = PHP_DRIVER_GET_CLUSTER(getThis()); object_init_ex(return_value, php_driver_future_session_ce); future = PHP_DRIVER_GET_FUTURE_SESSION(return_value); future->persist = self->persist; if (self->persist) { php5to7_zend_resource_le *le; hash_key_len = spprintf(&hash_key, 0, "%s:session:%s", self->hash_key, SAFE_STR(keyspace)); future->hash_key = hash_key; future->hash_key_len = hash_key_len; if (PHP5TO7_ZEND_HASH_FIND(&EG(persistent_list), hash_key, hash_key_len + 1, le) && Z_RES_P(le)->type == php_le_php_driver_session()) { php_driver_psession *psession = (php_driver_psession *) Z_RES_P(le)->ptr; future->session = php_driver_add_ref(psession->session); future->future = psession->future; return; } } future->session = php_driver_new_peref(cass_session_new(), free_session, 1); if (keyspace) { future->future = cass_session_connect_keyspace((CassSession *) future->session->data, self->cluster, keyspace); } else { future->future = cass_session_connect((CassSession *) future->session->data, self->cluster); } if (self->persist) { php5to7_zend_resource_le resource; php_driver_psession *psession = (php_driver_psession *) pecalloc(1, sizeof(php_driver_psession), 1); psession->session = php_driver_add_ref(future->session); psession->future = future->future; #if PHP_MAJOR_VERSION >= 7 ZVAL_NEW_PERSISTENT_RES(&resource, 0, psession, php_le_php_driver_session()); PHP5TO7_ZEND_HASH_UPDATE(&EG(persistent_list), hash_key, hash_key_len + 1, &resource, sizeof(php5to7_zend_resource_le)); PHP_DRIVER_G(persistent_sessions)++; #else resource.type = php_le_php_driver_session(); resource.ptr = psession; PHP5TO7_ZEND_HASH_UPDATE(&EG(persistent_list), hash_key, hash_key_len + 1, resource, sizeof(php5to7_zend_resource_le)); PHP_DRIVER_G(persistent_sessions)++; #endif } } ZEND_BEGIN_ARG_INFO_EX(arginfo_connect, 0, ZEND_RETURN_VALUE, 0) ZEND_ARG_INFO(0, keyspace) ZEND_ARG_INFO(0, timeout) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_connectAsync, 0, ZEND_RETURN_VALUE, 0) ZEND_ARG_INFO(0, keyspace) ZEND_END_ARG_INFO() static zend_function_entry php_driver_default_cluster_methods[] = { PHP_ME(DefaultCluster, connect, arginfo_connect, ZEND_ACC_PUBLIC) PHP_ME(DefaultCluster, connectAsync, arginfo_connectAsync, ZEND_ACC_PUBLIC) PHP_FE_END }; static zend_object_handlers php_driver_default_cluster_handlers; static HashTable * php_driver_default_cluster_properties(zval *object TSRMLS_DC) { HashTable *props = zend_std_get_properties(object TSRMLS_CC); return props; } static int php_driver_default_cluster_compare(zval *obj1, zval *obj2 TSRMLS_DC) { if (Z_OBJCE_P(obj1) != Z_OBJCE_P(obj2)) return 1; /* different classes */ return Z_OBJ_HANDLE_P(obj1) != Z_OBJ_HANDLE_P(obj1); } static void php_driver_default_cluster_free(php5to7_zend_object_free *object TSRMLS_DC) { php_driver_cluster *self = PHP5TO7_ZEND_OBJECT_GET(cluster, object); if (self->persist) { efree(self->hash_key); } else { if (self->cluster) { cass_cluster_free(self->cluster); } } PHP5TO7_ZVAL_MAYBE_DESTROY(self->default_timeout); zend_object_std_dtor(&self->zval TSRMLS_CC); PHP5TO7_MAYBE_EFREE(self); } static php5to7_zend_object php_driver_default_cluster_new(zend_class_entry *ce TSRMLS_DC) { php_driver_cluster *self = PHP5TO7_ZEND_OBJECT_ECALLOC(cluster, ce); self->cluster = NULL; self->default_consistency = PHP_DRIVER_DEFAULT_CONSISTENCY; self->default_page_size = 5000; self->persist = 0; self->hash_key = NULL; PHP5TO7_ZVAL_UNDEF(self->default_timeout); PHP5TO7_ZEND_OBJECT_INIT_EX(cluster, default_cluster, self, ce); } void php_driver_define_DefaultCluster(TSRMLS_D) { zend_class_entry ce; INIT_CLASS_ENTRY(ce, PHP_DRIVER_NAMESPACE "\\DefaultCluster", php_driver_default_cluster_methods); php_driver_default_cluster_ce = zend_register_internal_class(&ce TSRMLS_CC); zend_class_implements(php_driver_default_cluster_ce TSRMLS_CC, 1, php_driver_cluster_ce); php_driver_default_cluster_ce->ce_flags |= PHP5TO7_ZEND_ACC_FINAL; php_driver_default_cluster_ce->create_object = php_driver_default_cluster_new; memcpy(&php_driver_default_cluster_handlers, zend_get_std_object_handlers(), sizeof(zend_object_handlers)); php_driver_default_cluster_handlers.get_properties = php_driver_default_cluster_properties; php_driver_default_cluster_handlers.compare_objects = php_driver_default_cluster_compare; }
4,002
3,301
package com.alibaba.alink.common.pyrunner.bridge; import com.alibaba.alink.common.pyrunner.bridge.BasePythonBridge; import com.alibaba.alink.common.pyrunner.bridge.DedicatedPythonBridge; import com.alibaba.alink.common.pyrunner.bridge.SharedPythonBridge; import com.alibaba.alink.common.pyrunner.PyMainHandle; import com.alibaba.alink.testutil.categories.PyTest; import org.junit.Assert; import org.junit.Test; import org.junit.experimental.categories.Category; public class PythonBridgeTest { @Category(PyTest.class) @Test public void testSharedPythonBridge() { BasePythonBridge pythonBridge = SharedPythonBridge.inst(); pythonBridge.open("A", "python3", 0, 0, 3000, 3000, true); pythonBridge.open("B", "python3", 0, 0, 3000, 3000, true); PyMainHandle pyMain = pythonBridge.app(); Assert.assertTrue(pyMain.check()); pythonBridge.close("A"); pythonBridge.close("B"); } @Category(PyTest.class) @Test public void testDedicatedPythonBridge() { BasePythonBridge pythonBridge = DedicatedPythonBridge.inst(); pythonBridge.open("A", "python3", 0, 0, 3000, 3000, true); pythonBridge.open("B", "python3", 0, 0, 3000, 3000, true); PyMainHandle pyMain = pythonBridge.app(); Assert.assertTrue(pyMain.check()); pythonBridge.close("A"); pythonBridge.close("B"); } }
456
735
import torch from torch import nn, einsum import torch.nn.functional as F from einops import rearrange, repeat from perceiver_pytorch.perceiver_pytorch import exists, default, cache_fn, fourier_encode, PreNorm, FeedForward, Attention # latent mixer def Mixer(seq_len, mult = 4, dropout = 0.): return nn.Sequential( nn.Conv1d(seq_len, seq_len * mult, 1), nn.GELU(), nn.Dropout(dropout), nn.Conv1d(seq_len * mult, seq_len, 1) ) # main class class Perceiver(nn.Module): def __init__( self, *, num_freq_bands, depth, max_freq, freq_base = 2, input_channels = 3, input_axis = 2, num_latents = 512, latent_dim = 512, cross_heads = 1, latent_heads = 8, cross_dim_head = 64, latent_dim_head = 64, num_classes = 1000, attn_dropout = 0., ff_dropout = 0., weight_tie_layers = False, **kwargs ): super().__init__() self.input_axis = input_axis self.max_freq = max_freq self.num_freq_bands = num_freq_bands self.freq_base = freq_base input_dim = input_axis * ((num_freq_bands * 2) + 1) + input_channels self.latents = nn.Parameter(torch.randn(num_latents, latent_dim)) get_cross_attn = lambda: PreNorm(latent_dim, Attention(latent_dim, input_dim, heads = cross_heads, dim_head = cross_dim_head, dropout = attn_dropout), context_dim = input_dim) get_latent_attn = lambda: PreNorm(latent_dim, Mixer(num_latents, dropout = ff_dropout)) get_cross_ff = lambda: PreNorm(latent_dim, FeedForward(latent_dim, dropout = ff_dropout)) get_latent_ff = lambda: PreNorm(latent_dim, FeedForward(latent_dim, dropout = ff_dropout)) get_cross_attn, get_cross_ff, get_latent_attn, get_latent_ff = map(cache_fn, (get_cross_attn, get_cross_ff, get_latent_attn, get_latent_ff)) self.layers = nn.ModuleList([]) for i in range(depth): should_cache = i > 0 and weight_tie_layers cache_args = {'_cache': should_cache} self.layers.append(nn.ModuleList([ get_cross_attn(**cache_args), get_cross_ff(**cache_args), get_latent_attn(**cache_args), get_latent_ff(**cache_args) ])) self.to_logits = nn.Sequential( nn.LayerNorm(latent_dim), nn.Linear(latent_dim, num_classes) ) def forward(self, data, mask = None): b, *axis, _, device = *data.shape, data.device assert len(axis) == self.input_axis, 'input data must have the right number of axis' # calculate fourier encoded positions in the range of [-1, 1], for all axis axis_pos = list(map(lambda size: torch.linspace(-1., 1., steps = size, device = device), axis)) pos = torch.stack(torch.meshgrid(*axis_pos, indexing = 'ij'), dim = -1) enc_pos = fourier_encode(pos, self.max_freq, self.num_freq_bands, base = self.freq_base) enc_pos = rearrange(enc_pos, '... n d -> ... (n d)') enc_pos = repeat(enc_pos, '... -> b ...', b = b) # concat to channels of data and flatten axis data = torch.cat((data, enc_pos), dim = -1) data = rearrange(data, 'b ... d -> b (...) d') x = repeat(self.latents, 'n d -> b n d', b = b) for cross_attn, cross_ff, latent_attn, latent_ff in self.layers: x = cross_attn(x, context = data, mask = mask) + x x = cross_ff(x) + x x = latent_attn(x) + x x = latent_ff(x) + x x = x.mean(dim = -2) return self.to_logits(x)
1,775
1,168
/** * The MIT License * Copyright © 2010 JmxTrans team * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ package com.googlecode.jmxtrans.monitoring; import javax.management.ObjectName; public interface ThreadPoolExecutorMXBean { boolean allowsCoreThreadTimeOut(); int getActiveCount(); long getCompletedTaskCount(); int getCorePoolSize(); long getKeepAliveTimeSeconds(); int getLargestPoolSize(); int getMaximumPoolSize(); int getPoolSize(); long getTaskCount(); boolean isShutdown(); boolean isTerminated(); boolean isTerminating(); int workQueueRemainingCapacity(); int workQueueSize(); ObjectName getObjectName(); }
456
10,225
package io.quarkus.it.rest; import javax.ws.rs.GET; import javax.ws.rs.Path; import javax.ws.rs.Produces; @Path("/envelope") public class EnvelopeClassResource { @Path("/payload") @GET @Produces("application/json") public EnvelopeClass<PayloadClass> payloadClass() { return new EnvelopeClass<>(new PayloadClass("hello")); } }
147
4,716
#!/usr/bin/env python3 import os import sys import json import socket import socketserver from http import server from pprint import pprint from argparse import ArgumentParser import typing as tp RENDERERS: tp.List[tp.Callable] = [] def renderer(f): """Adds decorated function to renderers list.""" RENDERERS.append(f) @renderer def prompt(config): return ["$PROMPT = {!r}".format(config["prompt"])] @renderer def colors(config): style = config["colors"] if style == "default": return [] return ["$XONSH_COLOR_STYLE = {!r}".format(style)] @renderer def xontribs(config): xtribs = config["xontribs"] if not xtribs: return [] return ["xontrib load " + " ".join(xtribs)] def config_to_xonsh( config, prefix="# XONSH WEBCONFIG START", suffix="# XONSH WEBCONFIG END" ): """Turns config dict into xonsh code (str).""" lines = [prefix] for func in RENDERERS: lines.extend(func(config)) lines.append(suffix) return "\n".join(lines) def insert_into_xonshrc( config, xonshrc="~/.xonshrc", prefix="# XONSH WEBCONFIG START", suffix="# XONSH WEBCONFIG END", ): """Places a config dict into the xonshrc.""" # get current contents fname = os.path.expanduser(xonshrc) if os.path.isfile(fname): with open(fname, "r") as f: s = f.read() before, _, s = s.partition(prefix) _, _, after = s.partition(suffix) else: before = after = "" dname = os.path.dirname(fname) if dname: os.makedirs(dname, exist_ok=True) # compute new values new = config_to_xonsh(config, prefix=prefix, suffix=suffix) # write out the file with open(fname, "w", encoding="utf-8") as f: f.write(before + new + after) return fname class XonshConfigHTTPRequestHandler(server.SimpleHTTPRequestHandler): def _set_headers(self): self.send_response(200) self.send_header("Content-type", "text/html") self.end_headers() def do_POST(self): """Reads post request body""" self._set_headers() content_len = int(self.headers.get("content-length", 0)) post_body = self.rfile.read(content_len) config = json.loads(post_body) print("Web Config Values:") pprint(config) fname = insert_into_xonshrc(config) print("Wrote out to " + fname) self.wfile.write(b"received post request:<br>" + post_body) def make_parser(): p = ArgumentParser("xonfig web") p.add_argument( "--no-browser", action="store_false", dest="browser", default=True, help="don't open browser", ) return p def main(args=None): p = make_parser() ns = p.parse_args(args=args) webconfig_dir = os.path.dirname(__file__) if webconfig_dir: os.chdir(webconfig_dir) port = 8421 Handler = XonshConfigHTTPRequestHandler while port <= 9310: try: with socketserver.TCPServer(("", port), Handler) as httpd: url = "http://localhost:{0}".format(port) print("Web config started at '{0}'. Hit Crtl+C to stop.".format(url)) if ns.browser: import webbrowser webbrowser.open(url) httpd.serve_forever() break except socket.error: type, value = sys.exc_info()[:2] if "Address already in use" not in str(value): raise except KeyboardInterrupt: break port += 1 if __name__ == "__main__": main()
1,640
2,151
<reponame>zipated/src<filename>services/resource_coordinator/resource_coordinator_service_unittest.cc // Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include <memory> #include "base/macros.h" #include "base/run_loop.h" #include "mojo/public/cpp/bindings/binding.h" #include "services/resource_coordinator/public/cpp/frame_resource_coordinator.h" #include "services/resource_coordinator/public/cpp/page_resource_coordinator.h" #include "services/resource_coordinator/public/cpp/process_resource_coordinator.h" #include "services/resource_coordinator/public/cpp/system_resource_coordinator.h" #include "services/resource_coordinator/public/mojom/coordination_unit_provider.mojom.h" #include "services/resource_coordinator/public/mojom/service_constants.mojom.h" #include "services/service_manager/public/cpp/service.h" #include "services/service_manager/public/cpp/service_test.h" namespace resource_coordinator { class ResourceCoordinatorTest : public service_manager::test::ServiceTest { public: ResourceCoordinatorTest() : service_manager::test::ServiceTest("resource_coordinator_unittests") {} ~ResourceCoordinatorTest() override {} void GetIDCallback(const CoordinationUnitID& cu_id) { loop_->Quit(); } // Given a CU, tests that it works by invoking GetID and waiting for the // response. This test will hang (and eventually fail) if the response does // not come back from the remote endpoint. template <typename CoordinationUnitPtrType> void TestCUImpl(CoordinationUnitPtrType cu) { base::RunLoop loop; loop_ = &loop; cu->GetID(base::BindOnce(&ResourceCoordinatorTest::GetIDCallback, base::Unretained(this))); loop.Run(); loop_ = nullptr; } // Variant that works with mojo interface pointers. template <typename CoordinationUnitPtrType> void TestCU(CoordinationUnitPtrType& cu) { TestCUImpl<CoordinationUnitPtrType&>(cu); } // Variant that works with pointers to FooResourceCoordinator wrappers. template <typename CoordinationUnitPtrType> void TestCU(CoordinationUnitPtrType* cu) { TestCUImpl<CoordinationUnitPtrType*>(cu); } protected: void SetUp() override { service_manager::test::ServiceTest::SetUp(); connector()->StartService(mojom::kServiceName); } private: base::RunLoop* loop_ = nullptr; DISALLOW_COPY_AND_ASSIGN(ResourceCoordinatorTest); }; TEST_F(ResourceCoordinatorTest, ResourceCoordinatorInstantiate) { // Get the CU provider interface. mojom::CoordinationUnitProviderPtr provider; connector()->BindInterface(mojom::kServiceName, mojo::MakeRequest(&provider)); // Create and test a dummy FrameCU. CoordinationUnitID frame_id(CoordinationUnitType::kFrame, ""); mojom::FrameCoordinationUnitPtr frame_cu; provider->CreateFrameCoordinationUnit(mojo::MakeRequest(&frame_cu), frame_id); TestCU(frame_cu); // Create and test a dummy PageCU. CoordinationUnitID page_id(CoordinationUnitType::kPage, ""); mojom::PageCoordinationUnitPtr page_cu; provider->CreatePageCoordinationUnit(mojo::MakeRequest(&page_cu), page_id); TestCU(page_cu); // Create and test a dummy SystemCU. mojom::SystemCoordinationUnitPtr system_cu; provider->GetSystemCoordinationUnit(mojo::MakeRequest(&system_cu)); TestCU(system_cu); // Create and test a dummy ProcessCU. CoordinationUnitID process_id(CoordinationUnitType::kProcess, ""); mojom::ProcessCoordinationUnitPtr process_cu; provider->CreateProcessCoordinationUnit(mojo::MakeRequest(&process_cu), process_id); TestCU(process_cu); // Also test the convenience headers for creating and communicating with CUs. FrameResourceCoordinator frame_rc(connector()); TestCU(&frame_rc); PageResourceCoordinator page_rc(connector()); TestCU(&page_rc); ProcessResourceCoordinator process_rc(connector()); TestCU(&process_rc); SystemResourceCoordinator system_rc(connector()); TestCU(&system_rc); } } // namespace resource_coordinator
1,352
3,287
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- from knack.log import get_logger logger = get_logger(__name__) # region Private Link Scope def show_private_link_scope(client, resource_group_name, scope_name): return client.get(resource_group_name=resource_group_name, scope_name=scope_name) def delete_private_link_scope(client, resource_group_name, scope_name): return client.begin_delete(resource_group_name=resource_group_name, scope_name=scope_name) def list_private_link_scope(client, resource_group_name=None): if not resource_group_name: return client.list() return client.list_by_resource_group(resource_group_name=resource_group_name) def create_private_link_scope(client, resource_group_name, scope_name, location='Global', tags=None): from azure.mgmt.monitor.models import AzureMonitorPrivateLinkScope private_link_scope = AzureMonitorPrivateLinkScope(location=location, tags=tags) return client.create_or_update(resource_group_name=resource_group_name, scope_name=scope_name, azure_monitor_private_link_scope_payload=private_link_scope) def update_private_link_scope(client, resource_group_name, scope_name, tags): from azure.mgmt.monitor.models import TagsResource tags = TagsResource(tags=tags) return client.update_tags(resource_group_name=resource_group_name, scope_name=scope_name, private_link_scope_tags=tags) # endregion # region Private Link Scope Resource def show_private_link_scope_resource(client, resource_group_name, scope_name, resource_name): return client.get(resource_group_name=resource_group_name, scope_name=scope_name, name=resource_name) def delete_private_link_scope_resource(client, resource_group_name, scope_name, resource_name): return client.begin_delete(resource_group_name=resource_group_name, scope_name=scope_name, name=resource_name) def list_private_link_scope_resource(client, resource_group_name, scope_name): return client.list_by_private_link_scope(resource_group_name=resource_group_name, scope_name=scope_name) def create_private_link_scope_resource(client, resource_group_name, scope_name, resource_name, linked_resource_id): from azure.mgmt.monitor.models import ScopedResource scoped_resource = ScopedResource(linked_resource_id=linked_resource_id) return client.begin_create_or_update(resource_group_name=resource_group_name, scope_name=scope_name, name=resource_name, parameters=scoped_resource) # endregion # region Private Link Resource def list_private_link_resource(client, resource_group_name, scope_name): return client.list_by_private_link_scope(resource_group_name=resource_group_name, scope_name=scope_name) def show_private_link_resource(client, resource_group_name, scope_name, group_name): return client.get(resource_group_name=resource_group_name, scope_name=scope_name, group_name=group_name) # endregion # region Private Endpoint Connection def show_private_endpoint_connection(client, resource_group_name, scope_name, private_endpoint_connection_name): return client.get(resource_group_name=resource_group_name, scope_name=scope_name, private_endpoint_connection_name=private_endpoint_connection_name) def delete_private_endpoint_connection(client, resource_group_name, scope_name, private_endpoint_connection_name): return client.begin_delete(resource_group_name=resource_group_name, scope_name=scope_name, private_endpoint_connection_name=private_endpoint_connection_name) def list_private_endpoint_connection(client, resource_group_name, scope_name): return client.list_by_private_link_scope(resource_group_name=resource_group_name, scope_name=scope_name) # pylint: disable=line-too-long, unused-argument def _update_private_endpoint_connection_status(cmd, client, resource_group_name, scope_name, private_endpoint_connection_name, is_approved=True, description=None): private_endpoint_connection = client.get(resource_group_name=resource_group_name, scope_name=scope_name, private_endpoint_connection_name=private_endpoint_connection_name) old_status = private_endpoint_connection.private_link_service_connection_state.status new_status = "Approved" if is_approved else "Rejected" if old_status == new_status: logger.warning('The status has been satisfied. Skip this command.') return None private_endpoint_connection.private_link_service_connection_state.status = new_status private_endpoint_connection.private_link_service_connection_state.description = description from azure.mgmt.monitor.models import PrivateEndpointConnection private_endpoint_connection = PrivateEndpointConnection(private_link_service_connection_state=private_endpoint_connection.private_link_service_connection_state) return client.begin_create_or_update(resource_group_name=resource_group_name, scope_name=scope_name, private_endpoint_connection_name=private_endpoint_connection_name, parameters=private_endpoint_connection) def approve_private_endpoint_connection(cmd, client, resource_group_name, scope_name, private_endpoint_connection_name, description=""): return _update_private_endpoint_connection_status( cmd, client, resource_group_name=resource_group_name, scope_name=scope_name, is_approved=True, private_endpoint_connection_name=private_endpoint_connection_name, description=description ) def reject_private_endpoint_connection(cmd, client, resource_group_name, scope_name, private_endpoint_connection_name, description=""): return _update_private_endpoint_connection_status( cmd, client, resource_group_name=resource_group_name, scope_name=scope_name, is_approved=False, private_endpoint_connection_name=private_endpoint_connection_name, description=description ) # endregion
2,848
837
<filename>src/ompl/base/samplers/informed/src/OrderedInfSampler.cpp<gh_stars>100-1000 /********************************************************************* * Software License Agreement (BSD License) * * Copyright (c) 2014, University of Toronto * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the University of Toronto nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. *********************************************************************/ /* Authors: <NAME> */ #include "ompl/base/samplers/informed/OrderedInfSampler.h" #include "ompl/base/OptimizationObjective.h" namespace ompl { namespace base { // The default rejection-sampling class: OrderedInfSampler::OrderedInfSampler(const InformedSamplerPtr &infSamplerPtr, unsigned int batchSize) : InformedSampler(infSamplerPtr->getProblemDefn(), infSamplerPtr->getMaxNumberOfIters()) , infSampler_(infSamplerPtr) , batchSize_(batchSize) , orderedSamples_([this](const State *lhs, const State *rhs) { return queueComparator(lhs, rhs); }) { } bool OrderedInfSampler::sampleUniform(State *statePtr, const Cost &maxCost) { // Variables // Whether a sampler has been found and returned bool found = false; // Repeat until a valid pointer is found while (!found) { // Check if the batch is empty if (orderedSamples_.empty()) { // It is, recreate: createBatch(maxCost); } // Does the front of the priority queue meet our requirement (as the requirement may have changed since // the batch was generated) if (InformedSampler::opt_->isCostBetterThan(InformedSampler::heuristicSolnCost(orderedSamples_.top()), maxCost)) { // Copy the front of the priority queue. InformedSampler::space_->copyState(statePtr, orderedSamples_.top()); // Free the pointer in the queue InformedSampler::space_->freeState(orderedSamples_.top()); // Pop it orderedSamples_.pop(); // And mark that we've found a sample found = true; } else { // It does not, clear the queue clearBatch(); } } return found; } bool OrderedInfSampler::sampleUniform(State *, const Cost &, const Cost &) { throw ompl::Exception("Not implemented"); return false; } bool OrderedInfSampler::hasInformedMeasure() const { return infSampler_->hasInformedMeasure(); } double OrderedInfSampler::getInformedMeasure(const Cost &currentCost) const { return infSampler_->getInformedMeasure(currentCost); } bool OrderedInfSampler::queueComparator(const State *a, const State *b) { return InformedSampler::opt_->isCostBetterThan(InformedSampler::heuristicSolnCost(b), InformedSampler::heuristicSolnCost(a)); } void OrderedInfSampler::createBatch(const Cost &maxCost) { // Allocate, create and store batchSize_ samples for (unsigned int i = 0u; i < batchSize_; ++i) { // Allocate a state pointer State *newStatePtr = InformedSampler::space_->allocState(); // Sample the state pointer using the wrapped sampler infSampler_->sampleUniform(newStatePtr, maxCost); // Store it into the queue orderedSamples_.push(newStatePtr); } } void OrderedInfSampler::createBatch(const Cost &, const Cost &) { throw ompl::Exception("Not implemented"); } void OrderedInfSampler::clearBatch() { // Iterate through the entire queue, removing the element and freeing it. while (!orderedSamples_.empty()) { // Free the front state InformedSampler::space_->freeState(orderedSamples_.top()); // Pop the front state orderedSamples_.pop(); } } }; // base }; // ompl
2,670
529
/* pmeth_lib.c */ /* Written by Dr <NAME> (<EMAIL>) for the OpenSSL * project 2006. */ /* ==================================================================== * Copyright (c) 2006 The OpenSSL Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * 3. All advertising materials mentioning features or use of this * software must display the following acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)" * * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to * endorse or promote products derived from this software without * prior written permission. For written permission, please contact * [email protected]. * * 5. Products derived from this software may not be called "OpenSSL" * nor may "OpenSSL" appear in their names without prior written * permission of the OpenSSL Project. * * 6. Redistributions of any form whatsoever must retain the following * acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)" * * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * ==================================================================== * * This product includes cryptographic software written by <NAME> * (<EMAIL>). This product includes software written by <NAME> (<EMAIL>). * */ #include "cryptlib.h" #include <openssl/objects.h> #include <openssl/evp.h> #ifndef OPENSSL_NO_ENGINE #include <openssl/engine.h> #endif #include "asn1_locl.h" #include "evp_locl.h" #ifdef OPENSSL_SYS_WINDOWS #include <stdio.h> #include <stdlib.h> #endif typedef int sk_cmp_fn_type(const char * const *a, const char * const *b); #ifdef __cplusplus extern "C" { #endif DECLARE_STACK_OF(EVP_PKEY_METHOD) STACK_OF(EVP_PKEY_METHOD) *app_pkey_methods = NULL; extern const EVP_PKEY_METHOD rsa_pkey_meth, dh_pkey_meth, dsa_pkey_meth; extern const EVP_PKEY_METHOD ec_pkey_meth, hmac_pkey_meth; static const EVP_PKEY_METHOD *standard_methods[] = { #ifndef OPENSSL_NO_RSA &rsa_pkey_meth, #endif #ifndef OPENSSL_NO_DH &dh_pkey_meth, #endif #ifndef OPENSSL_NO_DSA &dsa_pkey_meth, #endif #ifndef OPENSSL_NO_EC &ec_pkey_meth, #endif &hmac_pkey_meth, }; DECLARE_OBJ_BSEARCH_CMP_FN(const EVP_PKEY_METHOD *, const EVP_PKEY_METHOD *, pmeth); static int pmeth_cmp(const EVP_PKEY_METHOD * const *a, const EVP_PKEY_METHOD * const *b) { return ((*a)->pkey_id - (*b)->pkey_id); } IMPLEMENT_OBJ_BSEARCH_CMP_FN(const EVP_PKEY_METHOD *, const EVP_PKEY_METHOD *, pmeth); const EVP_PKEY_METHOD *EVP_PKEY_meth_find(int type) { EVP_PKEY_METHOD tmp; const EVP_PKEY_METHOD *t = &tmp, **ret; tmp.pkey_id = type; if (app_pkey_methods) { int idx; idx = sk_EVP_PKEY_METHOD_find(app_pkey_methods, &tmp); if (idx >= 0) return sk_EVP_PKEY_METHOD_value(app_pkey_methods, idx); } ret = OBJ_bsearch_pmeth(&t, standard_methods, sizeof(standard_methods)/sizeof(EVP_PKEY_METHOD *)); if (!ret || !*ret) return NULL; return *ret; } static EVP_PKEY_CTX *int_ctx_new(EVP_PKEY *pkey, ENGINE *e, int id) { EVP_PKEY_CTX *ret; const EVP_PKEY_METHOD *pmeth; if (id == -1) { if (!pkey || !pkey->ameth) return NULL; id = pkey->ameth->pkey_id; } #ifndef OPENSSL_NO_ENGINE if (pkey && pkey->engine) e = pkey->engine; /* Try to find an ENGINE which implements this method */ if (e) { if (!ENGINE_init(e)) { EVPerr(EVP_F_INT_CTX_NEW,ERR_R_ENGINE_LIB); return NULL; } } else e = ENGINE_get_pkey_meth_engine(id); /* If an ENGINE handled this method look it up. Othewise * use internal tables. */ if (e) pmeth = ENGINE_get_pkey_meth(e, id); else #endif pmeth = EVP_PKEY_meth_find(id); if (pmeth == NULL) { EVPerr(EVP_F_INT_CTX_NEW,EVP_R_UNSUPPORTED_ALGORITHM); return NULL; } ret = (EVP_PKEY_CTX *)OPENSSL_malloc(sizeof(EVP_PKEY_CTX)); if (!ret) { #ifndef OPENSSL_NO_ENGINE if (e) ENGINE_finish(e); #endif EVPerr(EVP_F_INT_CTX_NEW,ERR_R_MALLOC_FAILURE); return NULL; } ret->engine = e; ret->pmeth = pmeth; ret->operation = EVP_PKEY_OP_UNDEFINED; ret->pkey = pkey; ret->peerkey = NULL; ret->pkey_gencb = 0; if (pkey) CRYPTO_add(&pkey->references,1,CRYPTO_LOCK_EVP_PKEY); ret->data = NULL; if (pmeth->init) { if (pmeth->init(ret) <= 0) { EVP_PKEY_CTX_free(ret); return NULL; } } return ret; } EVP_PKEY_METHOD* EVP_PKEY_meth_new(int id, int flags) { EVP_PKEY_METHOD *pmeth; pmeth = (EVP_PKEY_METHOD *)OPENSSL_malloc(sizeof(EVP_PKEY_METHOD)); if (!pmeth) return NULL; pmeth->pkey_id = id; pmeth->flags = flags | EVP_PKEY_FLAG_DYNAMIC; pmeth->init = 0; pmeth->copy = 0; pmeth->cleanup = 0; pmeth->paramgen_init = 0; pmeth->paramgen = 0; pmeth->keygen_init = 0; pmeth->keygen = 0; pmeth->sign_init = 0; pmeth->sign = 0; pmeth->verify_init = 0; pmeth->verify = 0; pmeth->verify_recover_init = 0; pmeth->verify_recover = 0; pmeth->signctx_init = 0; pmeth->signctx = 0; pmeth->verifyctx_init = 0; pmeth->verifyctx = 0; pmeth->encrypt_init = 0; pmeth->encrypt = 0; pmeth->decrypt_init = 0; pmeth->decrypt = 0; pmeth->derive_init = 0; pmeth->derive = 0; pmeth->ctrl = 0; pmeth->ctrl_str = 0; return pmeth; } void EVP_PKEY_meth_free(EVP_PKEY_METHOD *pmeth) { if (pmeth && (pmeth->flags & EVP_PKEY_FLAG_DYNAMIC)) OPENSSL_free(pmeth); } EVP_PKEY_CTX *EVP_PKEY_CTX_new(EVP_PKEY *pkey, ENGINE *e) { return int_ctx_new(pkey, e, -1); } EVP_PKEY_CTX *EVP_PKEY_CTX_new_id(int id, ENGINE *e) { return int_ctx_new(NULL, e, id); } EVP_PKEY_CTX *EVP_PKEY_CTX_dup(EVP_PKEY_CTX *pctx) { EVP_PKEY_CTX *rctx; if (!pctx->pmeth || !pctx->pmeth->copy) return NULL; #ifndef OPENSSL_NO_ENGINE /* Make sure it's safe to copy a pkey context using an ENGINE */ if (pctx->engine && !ENGINE_init(pctx->engine)) { EVPerr(EVP_F_EVP_PKEY_CTX_DUP,ERR_R_ENGINE_LIB); return 0; } #endif rctx = (EVP_PKEY_CTX *)OPENSSL_malloc(sizeof(EVP_PKEY_CTX)); if (!rctx) return NULL; rctx->pmeth = pctx->pmeth; #ifndef OPENSSL_NO_ENGINE rctx->engine = pctx->engine; #endif if (pctx->pkey) CRYPTO_add(&pctx->pkey->references,1,CRYPTO_LOCK_EVP_PKEY); rctx->pkey = pctx->pkey; if (pctx->peerkey) CRYPTO_add(&pctx->peerkey->references,1,CRYPTO_LOCK_EVP_PKEY); rctx->peerkey = pctx->peerkey; rctx->data = NULL; rctx->app_data = NULL; rctx->operation = pctx->operation; if (pctx->pmeth->copy(rctx, pctx) > 0) return rctx; EVP_PKEY_CTX_free(rctx); return NULL; } int EVP_PKEY_meth_add0(const EVP_PKEY_METHOD *pmeth) { if (app_pkey_methods == NULL) { app_pkey_methods = sk_EVP_PKEY_METHOD_new(pmeth_cmp); if (!app_pkey_methods) return 0; } if (!sk_EVP_PKEY_METHOD_push(app_pkey_methods, pmeth)) return 0; sk_EVP_PKEY_METHOD_sort(app_pkey_methods); return 1; } void EVP_PKEY_CTX_free(EVP_PKEY_CTX *ctx) { if (ctx == NULL) return; if (ctx->pmeth && ctx->pmeth->cleanup) ctx->pmeth->cleanup(ctx); if (ctx->pkey) EVP_PKEY_free(ctx->pkey); if (ctx->peerkey) EVP_PKEY_free(ctx->peerkey); #ifndef OPENSSL_NO_ENGINE if(ctx->engine) /* The EVP_PKEY_CTX we used belongs to an ENGINE, release the * functional reference we held for this reason. */ ENGINE_finish(ctx->engine); #endif OPENSSL_free(ctx); } int EVP_PKEY_CTX_ctrl(EVP_PKEY_CTX *ctx, int keytype, int optype, int cmd, int p1, void *p2) { int ret; if (!ctx || !ctx->pmeth || !ctx->pmeth->ctrl) { EVPerr(EVP_F_EVP_PKEY_CTX_CTRL, EVP_R_COMMAND_NOT_SUPPORTED); return -2; } if ((keytype != -1) && (ctx->pmeth->pkey_id != keytype)) return -1; if (ctx->operation == EVP_PKEY_OP_UNDEFINED) { EVPerr(EVP_F_EVP_PKEY_CTX_CTRL, EVP_R_NO_OPERATION_SET); return -1; } if ((optype != -1) && !(ctx->operation & optype)) { EVPerr(EVP_F_EVP_PKEY_CTX_CTRL, EVP_R_INVALID_OPERATION); return -1; } ret = ctx->pmeth->ctrl(ctx, cmd, p1, p2); if (ret == -2) EVPerr(EVP_F_EVP_PKEY_CTX_CTRL, EVP_R_COMMAND_NOT_SUPPORTED); return ret; } int EVP_PKEY_CTX_ctrl_str(EVP_PKEY_CTX *ctx, const char *name, const char *value) { if (!ctx || !ctx->pmeth || !ctx->pmeth->ctrl_str) { EVPerr(EVP_F_EVP_PKEY_CTX_CTRL_STR, EVP_R_COMMAND_NOT_SUPPORTED); return -2; } if (!TINYCLR_SSL_STRCMP(name, "digest")) { const EVP_MD *md; if (!value || !(md = EVP_get_digestbyname(value))) { EVPerr(EVP_F_EVP_PKEY_CTX_CTRL_STR, EVP_R_INVALID_DIGEST); return 0; } return EVP_PKEY_CTX_set_signature_md(ctx, md); } return ctx->pmeth->ctrl_str(ctx, name, value); } int EVP_PKEY_CTX_get_operation(EVP_PKEY_CTX *ctx) { return ctx->operation; } void EVP_PKEY_CTX_set0_keygen_info(EVP_PKEY_CTX *ctx, int *dat, int datlen) { ctx->keygen_info = dat; ctx->keygen_info_count = datlen; } void EVP_PKEY_CTX_set_data(EVP_PKEY_CTX *ctx, void *data) { ctx->data = data; } void *EVP_PKEY_CTX_get_data(EVP_PKEY_CTX *ctx) { return ctx->data; } EVP_PKEY *EVP_PKEY_CTX_get0_pkey(EVP_PKEY_CTX *ctx) { return ctx->pkey; } EVP_PKEY *EVP_PKEY_CTX_get0_peerkey(EVP_PKEY_CTX *ctx) { return ctx->peerkey; } void EVP_PKEY_CTX_set_app_data(EVP_PKEY_CTX *ctx, void *data) { ctx->app_data = data; } void *EVP_PKEY_CTX_get_app_data(EVP_PKEY_CTX *ctx) { return ctx->app_data; } void EVP_PKEY_meth_set_init(EVP_PKEY_METHOD *pmeth, int (*init)(EVP_PKEY_CTX *ctx)) { pmeth->init = init; } void EVP_PKEY_meth_set_copy(EVP_PKEY_METHOD *pmeth, int (*copy)(EVP_PKEY_CTX *dst, EVP_PKEY_CTX *src)) { pmeth->copy = copy; } void EVP_PKEY_meth_set_cleanup(EVP_PKEY_METHOD *pmeth, void (*cleanup)(EVP_PKEY_CTX *ctx)) { pmeth->cleanup = cleanup; } void EVP_PKEY_meth_set_paramgen(EVP_PKEY_METHOD *pmeth, int (*paramgen_init)(EVP_PKEY_CTX *ctx), int (*paramgen)(EVP_PKEY_CTX *ctx, EVP_PKEY *pkey)) { pmeth->paramgen_init = paramgen_init; pmeth->paramgen = paramgen; } void EVP_PKEY_meth_set_keygen(EVP_PKEY_METHOD *pmeth, int (*keygen_init)(EVP_PKEY_CTX *ctx), int (*keygen)(EVP_PKEY_CTX *ctx, EVP_PKEY *pkey)) { pmeth->keygen_init = keygen_init; pmeth->keygen = keygen; } void EVP_PKEY_meth_set_sign(EVP_PKEY_METHOD *pmeth, int (*sign_init)(EVP_PKEY_CTX *ctx), int (*sign)(EVP_PKEY_CTX *ctx, unsigned char *sig, size_t *siglen, const unsigned char *tbs, size_t tbslen)) { pmeth->sign_init = sign_init; pmeth->sign = sign; } void EVP_PKEY_meth_set_verify(EVP_PKEY_METHOD *pmeth, int (*verify_init)(EVP_PKEY_CTX *ctx), int (*verify)(EVP_PKEY_CTX *ctx, const unsigned char *sig, size_t siglen, const unsigned char *tbs, size_t tbslen)) { pmeth->verify_init = verify_init; pmeth->verify = verify; } void EVP_PKEY_meth_set_verify_recover(EVP_PKEY_METHOD *pmeth, int (*verify_recover_init)(EVP_PKEY_CTX *ctx), int (*verify_recover)(EVP_PKEY_CTX *ctx, unsigned char *sig, size_t *siglen, const unsigned char *tbs, size_t tbslen)) { pmeth->verify_recover_init = verify_recover_init; pmeth->verify_recover = verify_recover; } void EVP_PKEY_meth_set_signctx(EVP_PKEY_METHOD *pmeth, int (*signctx_init)(EVP_PKEY_CTX *ctx, EVP_MD_CTX *mctx), int (*signctx)(EVP_PKEY_CTX *ctx, unsigned char *sig, size_t *siglen, EVP_MD_CTX *mctx)) { pmeth->signctx_init = signctx_init; pmeth->signctx = signctx; } void EVP_PKEY_meth_set_verifyctx(EVP_PKEY_METHOD *pmeth, int (*verifyctx_init)(EVP_PKEY_CTX *ctx, EVP_MD_CTX *mctx), int (*verifyctx)(EVP_PKEY_CTX *ctx, const unsigned char *sig,int siglen, EVP_MD_CTX *mctx)) { pmeth->verifyctx_init = verifyctx_init; pmeth->verifyctx = verifyctx; } void EVP_PKEY_meth_set_encrypt(EVP_PKEY_METHOD *pmeth, int (*encrypt_init)(EVP_PKEY_CTX *ctx), int (*encryptfn)(EVP_PKEY_CTX *ctx, unsigned char *out, size_t *outlen, const unsigned char *in, size_t inlen)) { pmeth->encrypt_init = encrypt_init; pmeth->encrypt = encryptfn; } void EVP_PKEY_meth_set_decrypt(EVP_PKEY_METHOD *pmeth, int (*decrypt_init)(EVP_PKEY_CTX *ctx), int (*decrypt)(EVP_PKEY_CTX *ctx, unsigned char *out, size_t *outlen, const unsigned char *in, size_t inlen)) { pmeth->decrypt_init = decrypt_init; pmeth->decrypt = decrypt; } void EVP_PKEY_meth_set_derive(EVP_PKEY_METHOD *pmeth, int (*derive_init)(EVP_PKEY_CTX *ctx), int (*derive)(EVP_PKEY_CTX *ctx, unsigned char *key, size_t *keylen)) { pmeth->derive_init = derive_init; pmeth->derive = derive; } void EVP_PKEY_meth_set_ctrl(EVP_PKEY_METHOD *pmeth, int (*ctrl)(EVP_PKEY_CTX *ctx, int type, int p1, void *p2), int (*ctrl_str)(EVP_PKEY_CTX *ctx, const char *type, const char *value)) { pmeth->ctrl = ctrl; pmeth->ctrl_str = ctrl_str; } #ifdef __cplusplus } #endif
6,913
2,151
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_UI_DESKTOP_IOS_PROMOTION_SMS_SERVICE_H_ #define CHROME_BROWSER_UI_DESKTOP_IOS_PROMOTION_SMS_SERVICE_H_ #include <stddef.h> #include <map> #include <memory> #include <string> #include <vector> #include "base/callback_forward.h" #include "base/macros.h" #include "base/memory/ref_counted.h" #include "base/memory/weak_ptr.h" #include "components/keyed_service/core/keyed_service.h" #include "url/gurl.h" namespace network { class SharedURLLoaderFactory; } class OAuth2TokenService; class SigninManagerBase; // Provides an API for querying a logged in users's verified phone number, // and sending a predetermined promotional SMS to that number. This class is // based heavily on WebHistoryService's implementation to query Google services. class SMSService : public KeyedService { public: class Request { public: virtual ~Request(); virtual bool IsPending() = 0; // Returns the response code received from the server, which will only be // valid if the request succeeded. virtual int GetResponseCode() = 0; // Returns the contents of the response body received from the server. virtual const std::string& GetResponseBody() = 0; virtual void SetPostData(const std::string& post_data) = 0; virtual void SetPostDataAndType(const std::string& post_data, const std::string& mime_type) = 0; // Tells the request to begin. virtual void Start() = 0; protected: Request(); }; typedef base::Callback< void(Request*, bool success, const std::string& number)> PhoneNumberCallback; typedef base::Callback<void(Request*, bool success)> CompletionCallback; SMSService(OAuth2TokenService* token_service, SigninManagerBase* signin_manager, scoped_refptr<network::SharedURLLoaderFactory> url_loader_factory); ~SMSService() override; // Query the logged in user's verified phone number. virtual void QueryPhoneNumber(const PhoneNumberCallback& callback); // Send an SMS to the logged in user's verified phone number. The text of // the SMS is determined by |promo_id|. virtual void SendSMS(const std::string& promo_id, const SMSService::PhoneNumberCallback& callback); protected: void QueryPhoneNumberCompletionCallback( const SMSService::PhoneNumberCallback& callback, SMSService::Request* request, bool success); void SendSMSCallback(const SMSService::PhoneNumberCallback& callback, SMSService::Request* request, bool success); private: virtual Request* CreateRequest(const GURL& url, const CompletionCallback& callback); // Stores pointer to OAuth2TokenService and SigninManagerBase instance. They // must outlive the SMSService and can be null during // tests. OAuth2TokenService* token_service_; SigninManagerBase* signin_manager_; // Request context getter to use. scoped_refptr<network::SharedURLLoaderFactory> url_loader_factory_; // Pending expiration requests to be canceled if not complete by profile // shutdown. std::map<Request*, std::unique_ptr<Request>> pending_requests_; base::WeakPtrFactory<SMSService> weak_ptr_factory_; DISALLOW_COPY_AND_ASSIGN(SMSService); }; #endif // CHROME_BROWSER_UI_DESKTOP_IOS_PROMOTION_SMS_SERVICE_H_
1,234
1,091
/* * Copyright (c) 2015-2020, Oracle and/or its affiliates. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.tribuo.evaluation.metrics; import org.tribuo.Model; import org.tribuo.Output; import org.tribuo.Prediction; import org.tribuo.sequence.SequenceModel; import java.util.Collections; import java.util.List; /** * The context for a metric or set of metrics. At minimum the model used to generate * the predictions, and the predictions themselves. */ public abstract class MetricContext<T extends Output<T>> { private final Model<T> model; private final SequenceModel<T> seqModel; private final List<Prediction<T>> predictions; protected MetricContext(Model<T> model, List<Prediction<T>> predictions) { this.model = model; this.seqModel = null; this.predictions = Collections.unmodifiableList(predictions); } protected MetricContext(SequenceModel<T> model, List<Prediction<T>> predictions) { this.model = null; this.seqModel = model; this.predictions = Collections.unmodifiableList(predictions); } /** * Gets the Model used by this context. * @return The model, or null if this MetricContext operates on a SequenceModel. */ public Model<T> getModel() { return model; } /** * Gets the SequenceModel used by this context. * @return The model, or null if this MetricContext operates on a Model. */ public SequenceModel<T> getSequenceModel() { return seqModel; } /** * Gets the predictions used by this context. * @return The predictions. */ public List<Prediction<T>> getPredictions() { return predictions; } }
732
1,004
<gh_stars>1000+ import asyncio import inspect import sys def is_double_callable(application): """ Tests to see if an application is a legacy-style (double-callable) application. """ # Look for a hint on the object first if getattr(application, "_asgi_single_callable", False): return False if getattr(application, "_asgi_double_callable", False): return True # Uninstanted classes are double-callable if inspect.isclass(application): return True # Instanted classes depend on their __call__ if hasattr(application, "__call__"): # We only check to see if its __call__ is a coroutine function - # if it's not, it still might be a coroutine function itself. if asyncio.iscoroutinefunction(application.__call__): return False # Non-classes we just check directly return not asyncio.iscoroutinefunction(application) def double_to_single_callable(application): """ Transforms a double-callable ASGI application into a single-callable one. """ async def new_application(scope, receive, send): instance = application(scope) return await instance(receive, send) return new_application def guarantee_single_callable(application): """ Takes either a single- or double-callable application and always returns it in single-callable style. Use this to add backwards compatibility for ASGI 2.0 applications to your server/test harness/etc. """ if is_double_callable(application): application = double_to_single_callable(application) return application if sys.version_info >= (3, 7): # these were introduced in 3.7 get_running_loop = asyncio.get_running_loop run_future = asyncio.run create_task = asyncio.create_task current_task = asyncio.current_task else: # marked as deprecated in 3.10, did not exist before 3.7 get_running_loop = asyncio.get_event_loop run_future = asyncio.ensure_future # does nothing, this is fine for <3.7 create_task = lambda task: task current_task = asyncio.Task.current_task
722
462
<reponame>kfischer-okarin/mamba<gh_stars>100-1000 from mamba import description, before, it, context from expects import expect, be_none, be_a, be_true, be_false from doublex import Spy from mamba import reporter, runnable from spec.object_mother import * with description('Errors in hooks') as self: with before.each: self.reporter = Spy(reporter.Reporter) self.example_group = an_example_group() with context('when an error was raised in a before.all hook'): with before.each: self.example_group.hooks['before_all'].append(self._error) with it('marks example as failed'): self.example = an_example() self.example_group.append(self.example) self.example_group.execute(self.reporter, runnable.ExecutionContext()) expect(self.example.error).not_to(be_none) with context('when example also launches an error'): with context('when before.each also launches an error'): with it('keeps the error happened in first hook'): self.example_group.hooks['before_each'].append(self._other_error) self.example = a_failing_example() self.example_group.append(self.example) self.example_group.execute(self.reporter, runnable.ExecutionContext()) expect(self.example.error.exception).to(be_a(NotImplementedError)) with it('keeps the error happened in hook'): self.example = a_failing_example() self.example_group.append(self.example) self.example_group.execute(self.reporter, runnable.ExecutionContext()) expect(self.example.error.exception).to(be_a(NotImplementedError)) with context('when an error was raised in a before.each hook'): with before.each: self.example_group.hooks['before_each'].append(self._error) with it('marks example as failed with parent exception'): self.example = an_example() self.example_group.append(self.example) self.example_group.execute(self.reporter, runnable.ExecutionContext()) expect(self.example.error.exception).to(be_a(NotImplementedError)) with it('does not executes the example'): self.example = a_failing_example() self.example_group.append(self.example) self.example_group.execute(self.reporter, runnable.ExecutionContext()) expect(self.example.was_run).to(be_false) with context('when an error was raised in an after.each hook'): with before.each: self.example_group.hooks['after_each'].append(self._error) with it('marks example as failed'): self.example = an_example() self.example_group.append(self.example) self.example_group.execute(self.reporter, runnable.ExecutionContext()) expect(self.example.error).not_to(be_none) with context('when an error happened in the example'): with it('still executes after_each hook'): self.example = a_failing_example() self.example_group.append(self.example) self.example_group.execute(self.reporter, runnable.ExecutionContext()) expect(isinstance(self.example.error.exception, ValueError)).to(be_false) with context('when an error was raised in an after.all hook'): with before.each: self.example_group.hooks['after_all'].append(self._error) with it('marks example as failed'): self.example = an_example() self.example_group.append(self.example) self.example_group.execute(self.reporter, runnable.ExecutionContext()) expect(self.example.error).not_to(be_none) with context('when example also launches an error'): with context('when after.each also launches an error'): with it('keeps the error happened in last hook'): self.example_group.hooks['after_each'].append(self._other_error) self.example = a_failing_example() self.example_group.append(self.example) self.example_group.execute(self.reporter, runnable.ExecutionContext()) expect(self.example.error.exception).to(be_a(NotImplementedError)) with it('keeps the error happened in last hook'): self.example = a_failing_example() self.example_group.append(self.example) self.example_group.execute(self.reporter, runnable.ExecutionContext()) expect(self.example.error.exception).to(be_a(NotImplementedError)) def _error(self, *args): raise NotImplementedError() def _other_error(self, *args): raise IOError()
2,094
852
// // File name: CMSDarkPairProduction // // Author: <NAME> (<EMAIL>) // <NAME> (<EMAIL>) // Creation date: 01.22.2015 // ------------------------------------------------------------------- // #ifndef SimG4Core_CustomPhysics_CMSDarkPairProduction_h #define SimG4Core_CustomPhysics_CMSDarkPairProduction_h #include <CLHEP/Units/PhysicalConstants.h> #include "G4PairProductionRelModel.hh" #include "G4PhysicsTable.hh" #include "G4NistManager.hh" #include "G4VEmModel.hh" class CMSDarkPairProduction : public G4PairProductionRelModel { public: CMSDarkPairProduction(const G4ParticleDefinition* p = nullptr, G4double df = 1.0, const G4String& nam = "BetheHeitlerLPM"); ~CMSDarkPairProduction() override; G4double ComputeCrossSectionPerAtom(const G4ParticleDefinition*, G4double kinEnergy, G4double Z, G4double A = 0., G4double cut = 0., G4double emax = DBL_MAX) override; private: G4double dark_factor; }; #endif
580
322
#if !defined(DUK_REPLACEMENTS_H_INCLUDED) #define DUK_REPLACEMENTS_H_INCLUDED #if !defined(DUK_SINGLE_FILE) #if defined(DUK_USE_COMPUTED_INFINITY) DUK_INTERNAL_DECL double duk_computed_infinity; #endif #if defined(DUK_USE_COMPUTED_NAN) DUK_INTERNAL_DECL double duk_computed_nan; #endif #endif /* !DUK_SINGLE_FILE */ #if defined(DUK_USE_REPL_FPCLASSIFY) DUK_INTERNAL_DECL int duk_repl_fpclassify(double x); #endif #if defined(DUK_USE_REPL_SIGNBIT) DUK_INTERNAL_DECL int duk_repl_signbit(double x); #endif #if defined(DUK_USE_REPL_ISFINITE) DUK_INTERNAL_DECL int duk_repl_isfinite(double x); #endif #if defined(DUK_USE_REPL_ISNAN) DUK_INTERNAL_DECL int duk_repl_isnan(double x); #endif #if defined(DUK_USE_REPL_ISINF) DUK_INTERNAL_DECL int duk_repl_isinf(double x); #endif #endif /* DUK_REPLACEMENTS_H_INCLUDED */
382
1,039
/* SPDX-License-Identifier: MIT * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, * modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Copyright: * 2020 <NAME> <<EMAIL>> * 2020 <NAME> <<EMAIL>> * 2020 <NAME> <<EMAIL>> */ #if !defined(SIMDE_X86_AVX512_MUL_H) #define SIMDE_X86_AVX512_MUL_H #include "types.h" #include "mov.h" HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ SIMDE_FUNCTION_ATTRIBUTES simde__m512 simde_mm512_mul_ps (simde__m512 a, simde__m512 b) { #if defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_mul_ps(a, b); #else simde__m512_private r_, a_ = simde__m512_to_private(a), b_ = simde__m512_to_private(b); #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.f32 = a_.f32 * b_.f32; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.m256) / sizeof(r_.m256[0])) ; i++) { r_.m256[i] = simde_mm256_mul_ps(a_.m256[i], b_.m256[i]); } #endif return simde__m512_from_private(r_); #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) #undef _mm512_mul_ps #define _mm512_mul_ps(a, b) simde_mm512_mul_ps(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m512 simde_mm512_mask_mul_ps(simde__m512 src, simde__mmask16 k, simde__m512 a, simde__m512 b) { #if defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_mask_mul_ps(src, k, a, b); #else return simde_mm512_mask_mov_ps(src, k, simde_mm512_mul_ps(a, b)); #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) #undef _mm512_mask_mul_ps #define _mm512_mask_mul_ps(src, k, a, b) simde_mm512_mask_mul_ps(src, k, a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m512 simde_mm512_maskz_mul_ps(simde__mmask16 k, simde__m512 a, simde__m512 b) { #if defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_maskz_mul_ps(k, a, b); #else return simde_mm512_maskz_mov_ps(k, simde_mm512_mul_ps(a, b)); #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) #undef _mm512_maskz_mul_ps #define _mm512_maskz_mul_ps(k, a, b) simde_mm512_maskz_mul_ps(k, a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m512d simde_mm512_mul_pd (simde__m512d a, simde__m512d b) { #if defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_mul_pd(a, b); #else simde__m512d_private r_, a_ = simde__m512d_to_private(a), b_ = simde__m512d_to_private(b); #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.f64 = a_.f64 * b_.f64; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.m256d) / sizeof(r_.m256d[0])) ; i++) { r_.m256d[i] = simde_mm256_mul_pd(a_.m256d[i], b_.m256d[i]); } #endif return simde__m512d_from_private(r_); #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) #undef _mm512_mul_pd #define _mm512_mul_pd(a, b) simde_mm512_mul_pd(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m512d simde_mm512_mask_mul_pd(simde__m512d src, simde__mmask8 k, simde__m512d a, simde__m512d b) { #if defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_mask_mul_pd(src, k, a, b); #else return simde_mm512_mask_mov_pd(src, k, simde_mm512_mul_pd(a, b)); #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) #undef _mm512_mask_mul_pd #define _mm512_mask_mul_pd(src, k, a, b) simde_mm512_mask_mul_pd(src, k, a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m512d simde_mm512_maskz_mul_pd(simde__mmask8 k, simde__m512d a, simde__m512d b) { #if defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_maskz_mul_pd(k, a, b); #else return simde_mm512_maskz_mov_pd(k, simde_mm512_mul_pd(a, b)); #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) #undef _mm512_maskz_mul_pd #define _mm512_maskz_mul_pd(k, a, b) simde_mm512_maskz_mul_pd(k, a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m512i simde_mm512_mul_epi32 (simde__m512i a, simde__m512i b) { #if defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_mul_epi32(a, b); #else simde__m512i_private r_, a_ = simde__m512i_to_private(a), b_ = simde__m512i_to_private(b); #if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_SHUFFLE_VECTOR_) simde__m512i_private x; __typeof__(r_.i64) ta, tb; /* Get even numbered 32-bit values */ x.i32 = SIMDE_SHUFFLE_VECTOR_(32, 64, a_.i32, b_.i32, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); /* Cast to 64 bits */ SIMDE_CONVERT_VECTOR_(ta, x.m256i_private[0].i32); SIMDE_CONVERT_VECTOR_(tb, x.m256i_private[1].i32); r_.i64 = ta * tb; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) { r_.i64[i] = HEDLEY_STATIC_CAST(int64_t, a_.i32[i << 1]) * HEDLEY_STATIC_CAST(int64_t, b_.i32[i << 1]); } #endif return simde__m512i_from_private(r_); #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) #undef _mm512_mul_epi32 #define _mm512_mul_epi32(a, b) simde_mm512_mul_epi32(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m512i simde_mm512_mask_mul_epi32(simde__m512i src, simde__mmask8 k, simde__m512i a, simde__m512i b) { #if defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_mask_mul_epi32(src, k, a, b); #else return simde_mm512_mask_mov_epi64(src, k, simde_mm512_mul_epi32(a, b)); #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) #undef _mm512_mask_mul_epi32 #define _mm512_mask_mul_epi32(src, k, a, b) simde_mm512_mask_mul_epi32(src, k, a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m512i simde_mm512_maskz_mul_epi32(simde__mmask8 k, simde__m512i a, simde__m512i b) { #if defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_maskz_mul_epi32(k, a, b); #else return simde_mm512_maskz_mov_epi64(k, simde_mm512_mul_epi32(a, b)); #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) #undef _mm512_maskz_mul_epi32 #define _mm512_maskz_mul_epi32(k, a, b) simde_mm512_maskz_mul_epi32(k, a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m512i simde_mm512_mul_epu32 (simde__m512i a, simde__m512i b) { #if defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_mul_epu32(a, b); #else simde__m512i_private r_, a_ = simde__m512i_to_private(a), b_ = simde__m512i_to_private(b); #if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_SHUFFLE_VECTOR_) simde__m512i_private x; __typeof__(r_.u64) ta, tb; x.u32 = SIMDE_SHUFFLE_VECTOR_(32, 64, a_.u32, b_.u32, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); SIMDE_CONVERT_VECTOR_(ta, x.m256i_private[0].u32); SIMDE_CONVERT_VECTOR_(tb, x.m256i_private[1].u32); r_.u64 = ta * tb; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) { r_.u64[i] = HEDLEY_STATIC_CAST(uint64_t, a_.u32[i << 1]) * HEDLEY_STATIC_CAST(uint64_t, b_.u32[i << 1]); } #endif return simde__m512i_from_private(r_); #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) #undef _mm512_mul_epu32 #define _mm512_mul_epu32(a, b) simde_mm512_mul_epu32(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m512i simde_mm512_mask_mul_epu32(simde__m512i src, simde__mmask8 k, simde__m512i a, simde__m512i b) { #if defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_mask_mul_epu32(src, k, a, b); #else return simde_mm512_mask_mov_epi64(src, k, simde_mm512_mul_epu32(a, b)); #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) #undef _mm512_mask_mul_epu32 #define _mm512_mask_mul_epu32(src, k, a, b) simde_mm512_mask_mul_epu32(src, k, a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m512i simde_mm512_maskz_mul_epu32(simde__mmask8 k, simde__m512i a, simde__m512i b) { #if defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_maskz_mul_epu32(k, a, b); #else return simde_mm512_maskz_mov_epi64(k, simde_mm512_mul_epu32(a, b)); #endif } #if defined(SIMDE_X86_AVX512F_ENABLE_NATIVE_ALIASES) #undef _mm512_maskz_mul_epu32 #define _mm512_maskz_mul_epu32(k, a, b) simde_mm512_maskz_mul_epu32(k, a, b) #endif SIMDE_END_DECLS_ HEDLEY_DIAGNOSTIC_POP #endif /* !defined(SIMDE_X86_AVX512_MUL_H) */
4,571
1,716
/* * Copyright 2013 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.common.jimfs; import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Preconditions.checkNotNull; import static com.google.common.jimfs.Feature.FILE_CHANNEL; import static com.google.common.jimfs.Jimfs.URI_SCHEME; import static java.nio.file.StandardOpenOption.APPEND; import com.google.common.collect.ImmutableSet; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.net.URI; import java.nio.channels.AsynchronousFileChannel; import java.nio.channels.FileChannel; import java.nio.channels.SeekableByteChannel; import java.nio.file.AccessMode; import java.nio.file.CopyOption; import java.nio.file.DirectoryStream; import java.nio.file.FileStore; import java.nio.file.FileSystem; import java.nio.file.FileSystems; import java.nio.file.LinkOption; import java.nio.file.OpenOption; import java.nio.file.Path; import java.nio.file.ProviderMismatchException; import java.nio.file.attribute.BasicFileAttributes; import java.nio.file.attribute.DosFileAttributes; import java.nio.file.attribute.FileAttribute; import java.nio.file.attribute.FileAttributeView; import java.nio.file.spi.FileSystemProvider; import java.util.Map; import java.util.Set; import java.util.concurrent.ExecutorService; import org.checkerframework.checker.nullness.compatqual.NullableDecl; /** * {@link FileSystemProvider} implementation for Jimfs. This provider implements the actual file * system operations but does not handle creation, caching or lookup of file systems. See {@link * SystemJimfsFileSystemProvider}, which is the {@code META-INF/services/} entry for Jimfs, for * those operations. * * @author <NAME> */ final class JimfsFileSystemProvider extends FileSystemProvider { private static final JimfsFileSystemProvider INSTANCE = new JimfsFileSystemProvider(); static { // Register the URL stream handler implementation. try { Handler.register(); } catch (Throwable e) { // Couldn't set the system property needed to register the handler. Nothing we can do really. } } /** Returns the singleton instance of this provider. */ static JimfsFileSystemProvider instance() { return INSTANCE; } @Override public String getScheme() { return URI_SCHEME; } @Override public FileSystem newFileSystem(URI uri, Map<String, ?> env) throws IOException { throw new UnsupportedOperationException( "This method should not be called directly;" + "use an overload of Jimfs.newFileSystem() to create a FileSystem."); } @Override public FileSystem newFileSystem(Path path, Map<String, ?> env) throws IOException { JimfsPath checkedPath = checkPath(path); checkNotNull(env); URI pathUri = checkedPath.toUri(); URI jarUri = URI.create("jar:" + pathUri); try { // pass the new jar:jimfs://... URI to be handled by ZipFileSystemProvider return FileSystems.newFileSystem(jarUri, env); } catch (Exception e) { // if any exception occurred, assume the file wasn't a zip file and that we don't support // viewing it as a file system throw new UnsupportedOperationException(e); } } @Override public FileSystem getFileSystem(URI uri) { throw new UnsupportedOperationException( "This method should not be called directly; " + "use FileSystems.getFileSystem(URI) instead."); } /** Gets the file system for the given path. */ private static JimfsFileSystem getFileSystem(Path path) { return (JimfsFileSystem) checkPath(path).getFileSystem(); } @Override public Path getPath(URI uri) { throw new UnsupportedOperationException( "This method should not be called directly; " + "use Paths.get(URI) instead."); } private static JimfsPath checkPath(Path path) { if (path instanceof JimfsPath) { return (JimfsPath) path; } throw new ProviderMismatchException( "path " + path + " is not associated with a Jimfs file system"); } /** Returns the default file system view for the given path. */ private static FileSystemView getDefaultView(JimfsPath path) { return getFileSystem(path).getDefaultView(); } @Override public FileChannel newFileChannel( Path path, Set<? extends OpenOption> options, FileAttribute<?>... attrs) throws IOException { JimfsPath checkedPath = checkPath(path); if (!checkedPath.getJimfsFileSystem().getFileStore().supportsFeature(FILE_CHANNEL)) { throw new UnsupportedOperationException(); } return newJimfsFileChannel(checkedPath, options, attrs); } private JimfsFileChannel newJimfsFileChannel( JimfsPath path, Set<? extends OpenOption> options, FileAttribute<?>... attrs) throws IOException { ImmutableSet<OpenOption> opts = Options.getOptionsForChannel(options); FileSystemView view = getDefaultView(path); RegularFile file = view.getOrCreateRegularFile(path, opts, attrs); return new JimfsFileChannel(file, opts, view.state()); } @Override public SeekableByteChannel newByteChannel( Path path, Set<? extends OpenOption> options, FileAttribute<?>... attrs) throws IOException { JimfsPath checkedPath = checkPath(path); JimfsFileChannel channel = newJimfsFileChannel(checkedPath, options, attrs); return checkedPath.getJimfsFileSystem().getFileStore().supportsFeature(FILE_CHANNEL) ? channel : new DowngradedSeekableByteChannel(channel); } @Override public AsynchronousFileChannel newAsynchronousFileChannel( Path path, Set<? extends OpenOption> options, @NullableDecl ExecutorService executor, FileAttribute<?>... attrs) throws IOException { // call newFileChannel and cast so that FileChannel support is checked there JimfsFileChannel channel = (JimfsFileChannel) newFileChannel(path, options, attrs); if (executor == null) { JimfsFileSystem fileSystem = (JimfsFileSystem) path.getFileSystem(); executor = fileSystem.getDefaultThreadPool(); } return channel.asAsynchronousFileChannel(executor); } @Override public InputStream newInputStream(Path path, OpenOption... options) throws IOException { JimfsPath checkedPath = checkPath(path); ImmutableSet<OpenOption> opts = Options.getOptionsForInputStream(options); FileSystemView view = getDefaultView(checkedPath); RegularFile file = view.getOrCreateRegularFile(checkedPath, opts, NO_ATTRS); return new JimfsInputStream(file, view.state()); } private static final FileAttribute<?>[] NO_ATTRS = {}; @Override public OutputStream newOutputStream(Path path, OpenOption... options) throws IOException { JimfsPath checkedPath = checkPath(path); ImmutableSet<OpenOption> opts = Options.getOptionsForOutputStream(options); FileSystemView view = getDefaultView(checkedPath); RegularFile file = view.getOrCreateRegularFile(checkedPath, opts, NO_ATTRS); return new JimfsOutputStream(file, opts.contains(APPEND), view.state()); } @Override public DirectoryStream<Path> newDirectoryStream( Path dir, DirectoryStream.Filter<? super Path> filter) throws IOException { JimfsPath checkedPath = checkPath(dir); return getDefaultView(checkedPath) .newDirectoryStream(checkedPath, filter, Options.FOLLOW_LINKS, checkedPath); } @Override public void createDirectory(Path dir, FileAttribute<?>... attrs) throws IOException { JimfsPath checkedPath = checkPath(dir); FileSystemView view = getDefaultView(checkedPath); view.createDirectory(checkedPath, attrs); } @Override public void createLink(Path link, Path existing) throws IOException { JimfsPath linkPath = checkPath(link); JimfsPath existingPath = checkPath(existing); checkArgument( linkPath.getFileSystem().equals(existingPath.getFileSystem()), "link and existing paths must belong to the same file system instance"); FileSystemView view = getDefaultView(linkPath); view.link(linkPath, getDefaultView(existingPath), existingPath); } @Override public void createSymbolicLink(Path link, Path target, FileAttribute<?>... attrs) throws IOException { JimfsPath linkPath = checkPath(link); JimfsPath targetPath = checkPath(target); checkArgument( linkPath.getFileSystem().equals(targetPath.getFileSystem()), "link and target paths must belong to the same file system instance"); FileSystemView view = getDefaultView(linkPath); view.createSymbolicLink(linkPath, targetPath, attrs); } @Override public Path readSymbolicLink(Path link) throws IOException { JimfsPath checkedPath = checkPath(link); return getDefaultView(checkedPath).readSymbolicLink(checkedPath); } @Override public void delete(Path path) throws IOException { JimfsPath checkedPath = checkPath(path); FileSystemView view = getDefaultView(checkedPath); view.deleteFile(checkedPath, FileSystemView.DeleteMode.ANY); } @Override public void copy(Path source, Path target, CopyOption... options) throws IOException { copy(source, target, Options.getCopyOptions(options), false); } private void copy(Path source, Path target, ImmutableSet<CopyOption> options, boolean move) throws IOException { JimfsPath sourcePath = checkPath(source); JimfsPath targetPath = checkPath(target); FileSystemView sourceView = getDefaultView(sourcePath); FileSystemView targetView = getDefaultView(targetPath); sourceView.copy(sourcePath, targetView, targetPath, options, move); } @Override public void move(Path source, Path target, CopyOption... options) throws IOException { copy(source, target, Options.getMoveOptions(options), true); } @Override public boolean isSameFile(Path path, Path path2) throws IOException { if (path.equals(path2)) { return true; } if (!(path instanceof JimfsPath && path2 instanceof JimfsPath)) { return false; } JimfsPath checkedPath = (JimfsPath) path; JimfsPath checkedPath2 = (JimfsPath) path2; FileSystemView view = getDefaultView(checkedPath); FileSystemView view2 = getDefaultView(checkedPath2); return view.isSameFile(checkedPath, view2, checkedPath2); } @Override public boolean isHidden(Path path) throws IOException { // TODO(cgdecker): This should probably be configurable, but this seems fine for now /* * If the DOS view is supported, use the Windows isHidden method (check the dos:hidden * attribute). Otherwise, use the Unix isHidden method (just check if the file name starts with * "."). */ JimfsPath checkedPath = checkPath(path); FileSystemView view = getDefaultView(checkedPath); if (getFileStore(path).supportsFileAttributeView("dos")) { return view.readAttributes(checkedPath, DosFileAttributes.class, Options.NOFOLLOW_LINKS) .isHidden(); } return path.getNameCount() > 0 && path.getFileName().toString().startsWith("."); } @Override public FileStore getFileStore(Path path) throws IOException { return getFileSystem(path).getFileStore(); } @Override public void checkAccess(Path path, AccessMode... modes) throws IOException { JimfsPath checkedPath = checkPath(path); getDefaultView(checkedPath).checkAccess(checkedPath); } @NullableDecl @Override public <V extends FileAttributeView> V getFileAttributeView( Path path, Class<V> type, LinkOption... options) { JimfsPath checkedPath = checkPath(path); return getDefaultView(checkedPath) .getFileAttributeView(checkedPath, type, Options.getLinkOptions(options)); } @Override public <A extends BasicFileAttributes> A readAttributes( Path path, Class<A> type, LinkOption... options) throws IOException { JimfsPath checkedPath = checkPath(path); return getDefaultView(checkedPath) .readAttributes(checkedPath, type, Options.getLinkOptions(options)); } @Override public Map<String, Object> readAttributes(Path path, String attributes, LinkOption... options) throws IOException { JimfsPath checkedPath = checkPath(path); return getDefaultView(checkedPath) .readAttributes(checkedPath, attributes, Options.getLinkOptions(options)); } @Override public void setAttribute(Path path, String attribute, Object value, LinkOption... options) throws IOException { JimfsPath checkedPath = checkPath(path); getDefaultView(checkedPath) .setAttribute(checkedPath, attribute, value, Options.getLinkOptions(options)); } }
4,151
4,216
/** * @file silhouette_score.hpp * @author <NAME> * * The Silhouette metric. * * mlpack is free software; you may redistribute it and/or modify it under the * terms of the 3-clause BSD license. You should have received a copy of the * 3-clause BSD license along with mlpack. If not, see * http://www.opensource.org/licenses/BSD-3-Clause for more information. */ #ifndef MLPACK_CORE_CV_METRICS_SILHOUETTE_SCORE_HPP #define MLPACK_CORE_CV_METRICS_SILHOUETTE_SCORE_HPP #include <mlpack/core.hpp> namespace mlpack { namespace cv { /** * The Silhouette Score is a metric of performance for clustering * that represents the quality of clusters made as a result. * It provides an indication of goodness of fit and therefore a measure of how * well unseen samples are likely to be predicted by the model, considering * the inter-cluster and intra-cluster dissimilarities. * Silhoutte Score is dependent on the metric used to calculate the * dissimilarities. The best possible score is @f$ s(i) = 1.0 @f$. * Smaller values of Silhouette Score indicate poor clustering. * Negative values would occur when a wrong label was put on the element. * Values near zero indicate overlapping clusters. * For an element i @f$ a(i) @f$ is within cluster average dissimilarity * and @f$ b(i) @f$ is minimum of average dissimilarity from other clusters. * the Silhouette Score @f$ s(i) @f$ of a Sample is calculated by * @f{eqnarray*}{ * s(i) &=& \frac{b(i) - a(i)}{max\{b(i), a(i)\}} * @f} * * The Overall Silhouette Score is the mean of individual silhoutte scores. */ class SilhouetteScore { public: /** * Find the overall silhouette score. * * @param X Column-major data used for clustering. * @param labels Labels assigned to data by clustering. * @param metric Metric to be used to calculate dissimilarity. * @return (double) silhouette score. */ template<typename DataType, typename Metric> static double Overall(const DataType& X, const arma::Row<size_t>& labels, const Metric& metric); /** * Find the individual silhouette scores for precomputted dissimilarites. * * @param distances Square matrix containing distances between data points. * @param labels Labels assigned to data by clustering. * @return (arma::rowvec) element-wise silhouette score. */ template<typename DataType> static arma::rowvec SamplesScore(const DataType& distances, const arma::Row<size_t>& labels); /** * Find silhouette score of all individual elements. * (Distance not precomputed). * * @param X Column-major data used for clustering. * @param labels Labels assigned to data by clustering. * @param metric Metric to be used to calculate dissimilarity. * @return (arma::rowvec) element-wise silhouette score. */ template<typename DataType, typename Metric> static arma::rowvec SamplesScore(const DataType& X, const arma::Row<size_t>& labels, const Metric& metric); /** * Find mean distance of element from a given cluster. * * @param distances colvec containing distances from other elements. * @param labels Labels assigned to data by clustering. * @param label label of the target cluster. * @param sameCluster true if calculating mean distance from same cluster. * @return (double) distance from the cluster. */ static double MeanDistanceFromCluster(const arma::colvec& distances, const arma::Row<size_t>& labels, const size_t& label, const bool& sameCluster = false); /** * Information for hyper-parameter tuning code. It indicates that we want * to maximize the metric. */ static const bool NeedsMinimization = false; }; } // namespace cv } // namespace mlpack // Include implementation. #include "silhouette_score_impl.hpp" #endif
1,425
575
<gh_stars>100-1000 // // DemoAlertView.h // DJISdkDemo // // Copyright © 2015 DJI. All rights reserved. // #import <Foundation/Foundation.h> NS_ASSUME_NONNULL_BEGIN extern void ShowResult(NSString * format, ...); typedef void (^DemoAlertViewActionBlock)(NSUInteger buttonIndex); typedef void (^DemoAlertInputViewActionBlock)(NSArray<UITextField*>* _Nullable textFields, NSUInteger buttonIndex); @interface DemoAlertView : NSObject +(instancetype _Nullable) showAlertViewWithMessage:(NSString* _Nullable)message titles:(NSArray<NSString*> * _Nullable)titles action:(DemoAlertViewActionBlock _Nullable)actionBlock; +(instancetype _Nullable) showAlertViewWithMessage:(NSString* _Nullable)message titles:(NSArray<NSString*> * _Nullable)titles textFields:(NSArray<NSString*>* _Nullable)textFields action:(DemoAlertInputViewActionBlock _Nullable)actionBlock; +(instancetype _Nullable) showAlertViewWithMessage:(NSString* _Nullable)message titles:(NSArray<NSString*> * _Nullable)titles action:(DemoAlertViewActionBlock _Nullable)actionBlock presentedViewController:(UIViewController *)viewController; -(void) dismissAlertView; -(void) unpdateMessage:(nullable NSString *)message; @end NS_ASSUME_NONNULL_END
394
1,962
<gh_stars>1000+ package com.bolingcavalry.service.impl; import com.bolingcavalry.service.ConsumeModeService; import com.bolingcavalry.service.OrderEvent; import com.lmax.disruptor.EventTranslatorOneArg; import lombok.extern.slf4j.Slf4j; import org.junit.Test; import org.junit.runner.RunWith; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Qualifier; import org.springframework.boot.test.context.SpringBootTest; import org.springframework.test.context.junit4.SpringRunner; import java.util.concurrent.CountDownLatch; import static org.junit.Assert.assertEquals; @RunWith(SpringRunner.class) @SpringBootTest @Slf4j public class ConsumeModeServiceTest { @Autowired @Qualifier("independentModeService") ConsumeModeService independentModeService; @Autowired @Qualifier("shareModeService") ConsumeModeService shareModeService; @Autowired @Qualifier("independentAndShareModeService") ConsumeModeService independentAndShareModeService; @Autowired @Qualifier("multiProducerService") ConsumeModeService multiProducerService; @Autowired @Qualifier("scene5") ConsumeModeService scene5; @Autowired @Qualifier("scene6") ConsumeModeService scene6; @Autowired @Qualifier("scene7") ConsumeModeService scene7; @Autowired @Qualifier("scene8") ConsumeModeService scene8; @Autowired @Qualifier("scene9") ConsumeModeService scene9; @Autowired @Qualifier("scene10") ConsumeModeService scene10; @Autowired @Qualifier("translatorPublishService") ConsumeModeService translatorPublishService; @Autowired @Qualifier("lambdaService") ConsumeModeService lambdaService; /** * 测试时生产的消息数量 */ private static final int EVENT_COUNT = 100; private void testConsumeModeService(ConsumeModeService service, int eventCount, int expectEventCount) throws InterruptedException { CountDownLatch countDownLatch = new CountDownLatch(1); // 告诉service,等消费到expectEventCount个消息时,就执行countDownLatch.countDown方法 service.setCountDown(countDownLatch, expectEventCount); for(int i=0;i<eventCount;i++) { log.info("publich {}", i); service.publish(String.valueOf(i)); } // 当前线程开始等待,前面的service.setCountDown方法已经告诉过service, // 等消费到expectEventCount个消息时,就执行countDownLatch.countDown方法 // 千万注意,要调用await方法,而不是wait方法! countDownLatch.await(); // 消费的事件总数应该等于发布的事件数 assertEquals(expectEventCount, service.eventCount()); } @Test public void testIndependentModeService() throws InterruptedException { log.info("start testIndependentModeService"); testConsumeModeService(independentModeService, EVENT_COUNT, EVENT_COUNT * ConsumeModeService.INDEPENDENT_CONSUMER_NUM); } @Test public void testShareModeService() throws InterruptedException { log.info("start testShareModeService"); testConsumeModeService(shareModeService, EVENT_COUNT, EVENT_COUNT); } @Test public void independentAndShareModeService() throws InterruptedException { log.info("start independentAndShareModeService"); testConsumeModeService(independentAndShareModeService, EVENT_COUNT, EVENT_COUNT * ConsumeModeService.INDEPENDENT_CONSUMER_NUM); } @Test public void testMultiProducerService() throws InterruptedException { log.info("start testMultiProducerService"); CountDownLatch countDownLatch = new CountDownLatch(1); // 两个生产者,每个生产100个事件,一共生产两百个事件 // 两个独立消费者,每人消费200个事件,因此一共消费400个事件 int expectEventCount = EVENT_COUNT*4; // 告诉service,等消费到400个消息时,就执行countDownLatch.countDown方法 multiProducerService.setCountDown(countDownLatch, expectEventCount); // 启动一个线程,用第一个生产者生产事件 new Thread(() -> { for(int i=0;i<EVENT_COUNT;i++) { log.info("publich {}", i); multiProducerService.publish(String.valueOf(i)); } }).start(); // 再启动一个线程,用第二个生产者生产事件 new Thread(() -> { for(int i=0;i<EVENT_COUNT;i++) { log.info("publishWithProducer2 {}", i); try { multiProducerService.publishWithProducer2(String.valueOf(i)); } catch (Exception e) { e.printStackTrace(); } } }).start(); // 当前线程开始等待,前面的service.setCountDown方法已经告诉过service, // 等消费到expectEventCount个消息时,就执行countDownLatch.countDown方法 // 千万注意,要调用await方法,而不是wait方法! countDownLatch.await(); // 消费的事件总数应该等于发布的事件数 assertEquals(expectEventCount, multiProducerService.eventCount()); } @Test public void testScene5 () throws InterruptedException { log.info("start testScene5"); testConsumeModeService(scene5, EVENT_COUNT, // 三个独立消费者,一共消费300个事件 EVENT_COUNT * 3); } @Test public void testScene6 () throws InterruptedException { log.info("start testScene6"); testConsumeModeService(scene6, EVENT_COUNT, // 四个独立消费者,一共消费400个事件 EVENT_COUNT * 4); } @Test public void testScene7 () throws InterruptedException { log.info("start testScene7"); testConsumeModeService(scene7, EVENT_COUNT, // 五个独立消费者,一共消费500个事件 EVENT_COUNT * 5); } @Test public void testScene8 () throws InterruptedException { log.info("start testScene8"); testConsumeModeService(scene8, EVENT_COUNT, // C1和C2共同消费,C3和C4共同消费,C5虽然只是一个,但也是共同消费模式, // 也就是一共有三组消费者,所以一共消费300个事件 EVENT_COUNT * 3); } @Test public void testScene9 () throws InterruptedException { log.info("start testScene9"); testConsumeModeService(scene9, EVENT_COUNT, // C1和C2共同消费(100个事件), // C3和C4独立消费(200个事件), // C5独立消费(100个事件), // 所以一共消费400个事件 EVENT_COUNT * 4); } @Test public void testScene10 () throws InterruptedException { log.info("start testScene10"); testConsumeModeService(scene10, EVENT_COUNT, // C1和C2独立消费(200个事件), // C3和C4独立消费(100个事件), // C5独立消费(100个事件), // 所以一共消费400个事件 EVENT_COUNT * 4); } @Test public void testTranslatorPublishService() throws InterruptedException { log.info("start testTranslatorPublishService"); testConsumeModeService(translatorPublishService, EVENT_COUNT, EVENT_COUNT * ConsumeModeService.INDEPENDENT_CONSUMER_NUM); } @Test public void testLambdaService() throws InterruptedException { log.info("start testLambdaService"); CountDownLatch countDownLatch = new CountDownLatch(1); // 告诉service,等消费到expectEventCount个消息时,就执行countDownLatch.countDown方法 lambdaService.setCountDown(countDownLatch, EVENT_COUNT); for(int i=0;i<EVENT_COUNT;i++) { log.info("publich {}", i); final String content = String.valueOf(i); lambdaService.publistEvent((event, sequence, value) -> event.setValue(value), content); } // 当前线程开始等待,前面的service.setCountDown方法已经告诉过service, // 等消费到expectEventCount个消息时,就执行countDownLatch.countDown方法 // 千万注意,要调用await方法,而不是wait方法! countDownLatch.await(); // 消费的事件总数应该等于发布的事件数 assertEquals(EVENT_COUNT, lambdaService.eventCount()); } }
4,364
482
package io.cattle.platform.iaas.api.auth.identity; import io.cattle.platform.api.auth.Identity; import io.cattle.platform.api.auth.Policy; import io.cattle.platform.api.pubsub.manager.SubscribeManager; import io.cattle.platform.archaius.util.ArchaiusUtil; import io.cattle.platform.core.dao.AccountDao; import io.cattle.platform.eventing.EventService; import io.cattle.platform.eventing.model.EventVO; import io.cattle.platform.iaas.api.auth.AbstractTokenUtil; import io.cattle.platform.iaas.api.auth.SecurityConstants; import io.cattle.platform.iaas.api.auth.dao.AuthDao; import io.cattle.platform.iaas.api.auth.dao.AuthTokenDao; import io.cattle.platform.iaas.api.auth.integration.external.ExternalServiceAuthProvider; import io.cattle.platform.iaas.api.auth.integration.interfaces.TokenCreator; import io.cattle.platform.iaas.api.auth.integration.internal.rancher.TokenAuthLookup; import io.cattle.platform.iaas.event.IaasEvents; import io.cattle.platform.object.ObjectManager; import io.cattle.platform.token.TokenService; import io.github.ibuildthecloud.gdapi.context.ApiContext; import io.github.ibuildthecloud.gdapi.exception.ClientVisibleException; import io.github.ibuildthecloud.gdapi.factory.SchemaFactory; import io.github.ibuildthecloud.gdapi.model.ListOptions; import io.github.ibuildthecloud.gdapi.request.ApiRequest; import io.github.ibuildthecloud.gdapi.request.resource.impl.AbstractNoOpResourceManager; import io.github.ibuildthecloud.gdapi.util.ResponseCodes; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Map; import javax.inject.Inject; import javax.servlet.http.HttpServletResponse; import org.apache.commons.lang3.StringUtils; import com.netflix.config.DynamicBooleanProperty; public class TokenResourceManager extends AbstractNoOpResourceManager { @Inject ObjectManager objectManager; @Inject AuthTokenDao authTokenDao; @Inject IdentityManager identityManager; @Inject ExternalServiceAuthProvider externalAuthProvider; @Inject TokenService tokenService; @Inject TokenAuthLookup tokenAuthLookup; @Inject AuthDao authDao; @Inject AccountDao accountDao; @Inject EventService eventService; private List<TokenCreator> tokenCreators; private static final DynamicBooleanProperty RESTRICT_CONCURRENT_SESSIONS = ArchaiusUtil.getBoolean("api.auth.restrict.concurrent.sessions"); @Override public Class<?>[] getTypeClasses() { return new Class<?>[]{Token.class}; } @Override protected Object createInternal(String type, ApiRequest request) { if (!StringUtils.equals(AbstractTokenUtil.TOKEN, request.getType())) { return null; } return createToken(request); } private Token createToken(ApiRequest request) { Token token = null; if (SecurityConstants.AUTH_PROVIDER.get() == null || SecurityConstants.NO_PROVIDER.equalsIgnoreCase(SecurityConstants.AUTH_PROVIDER.get())) { throw new ClientVisibleException(ResponseCodes.INTERNAL_SERVER_ERROR, "NoAuthProvider", "No Auth provider is configured.", null); } if (SecurityConstants.INTERNAL_AUTH_PROVIDERS.contains(SecurityConstants.AUTH_PROVIDER.get())) { for (TokenCreator tokenCreator : tokenCreators) { if (tokenCreator.isConfigured() && tokenCreator.providerType().equalsIgnoreCase(SecurityConstants.AUTH_PROVIDER.get())) { if (!SecurityConstants.SECURITY.get()) { tokenCreator.reset(); } token = tokenCreator.getToken(request); break; } } } else { //call external service token = externalAuthProvider.getToken(request); } if (token == null){ throw new ClientVisibleException(ResponseCodes.BAD_REQUEST, "codeInvalid", "Code provided is invalid.", null); } Identity[] identities = token.getIdentities(); List<Identity> transFormedIdentities = new ArrayList<>(); for (Identity identity : identities) { transFormedIdentities.add(identityManager.untransform(identity, true)); } token.setIdentities(transFormedIdentities); token.setUserIdentity(identityManager.untransform(token.getUserIdentity(), true)); long authenticatedAsAccountId = token.getAuthenticatedAsAccountId(); long tokenAccountId = ((Policy) ApiContext.getContext().getPolicy()).getAccountId(); if (RESTRICT_CONCURRENT_SESSIONS.get()) { authTokenDao.deletePreviousTokens(authenticatedAsAccountId, tokenAccountId); String event = IaasEvents.appendAccount(SubscribeManager.EVENT_DISCONNECT, authenticatedAsAccountId); eventService.publish(EventVO.newEvent(event)); } token.setJwt(authTokenDao.createToken(token.getJwt(), token.getAuthProvider(), ((Policy) ApiContext.getContext().getPolicy()).getAccountId(), authenticatedAsAccountId).getKey()); return token; } @Override protected Object listInternal(SchemaFactory schemaFactory, String type, Map<Object, Object> criteria, ListOptions options) { Token token = listToken(); return Collections.singletonList(token); } protected Token listToken() { Token token = new Token(); if (SecurityConstants.AUTH_PROVIDER.get() == null || SecurityConstants.NO_PROVIDER.equalsIgnoreCase(SecurityConstants.AUTH_PROVIDER.get())) { return token; } if (SecurityConstants.INTERNAL_AUTH_PROVIDERS.contains(SecurityConstants.AUTH_PROVIDER.get())) { for (TokenCreator tokenCreator : tokenCreators) { if (tokenCreator.isConfigured() && tokenCreator.providerType().equalsIgnoreCase(SecurityConstants.AUTH_PROVIDER.get())) { token = tokenCreator.getCurrentToken(); break; } } return token; } else { //get redirect Url from external service if (externalAuthProvider.isConfigured()) { return externalAuthProvider.readCurrentToken(); } } return token; } public List<TokenCreator> getTokenCreators() { return tokenCreators; } @Inject public void setTokenCreators(List<TokenCreator> tokenCreators) { this.tokenCreators = tokenCreators; } @Override protected Object deleteInternal(String type, String id, Object obj, ApiRequest request) { if (!StringUtils.equals(AbstractTokenUtil.TOKEN, request.getType())) { return null; } return deleteToken(obj, request); } protected Object deleteToken(Object obj, ApiRequest request) { Token token = new Token(); String jwt = ""; token = listToken(); jwt = token.getJwt(); if(StringUtils.isBlank(jwt)) { throw new ClientVisibleException(ResponseCodes.INTERNAL_SERVER_ERROR, "JWTNotProvided", "Request does not contain JWT cookie", null); } request.setResponseCode(ResponseCodes.NO_CONTENT); HttpServletResponse response = request.getServletContext().getResponse(); String cookieString="token=;Path=/;Expires=Thu, 01 Jan 1970 00:00:00 GMT;"; response.addHeader("Set-Cookie", cookieString); request.getServletContext().setResponse(response); if(authTokenDao.deleteToken(jwt)) { return obj; } return null; } }
3,066
345
<gh_stars>100-1000 # Copyright (c) 2017-2020, <NAME> <<EMAIL>> # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause """MPSSE command debug tracer.""" #pylint: disable-msg=missing-docstring #pylint: disable-msg=too-many-instance-attributes from binascii import hexlify from collections import deque from inspect import currentframe from logging import getLogger from string import ascii_uppercase from struct import unpack as sunpack from typing import Union from .ftdi import Ftdi class FtdiMpsseTracer: """FTDI MPSSE protocol decoder.""" MPSSE_ENGINES = { 0x0200: 0, 0x0400: 0, 0x0500: 0, 0x0600: 0, 0x0700: 2, 0x0800: 2, 0x0900: 1, 0x1000: 0} """Count of MPSSE engines.""" def __init__(self, version): count = self.MPSSE_ENGINES[version] self._engines = [None] * count def send(self, iface: int, buf: Union[bytes, bytearray]) -> None: self._get_engine(iface).send(buf) def receive(self, iface: int, buf: Union[bytes, bytearray]) -> None: self._get_engine(iface).receive(buf) def _get_engine(self, iface: int): iface -= 1 try: self._engines[iface] except IndexError as exc: raise ValueError('No MPSSE engine available on interface %d' % iface) from exc if not self._engines[iface]: self._engines[iface] = FtdiMpsseEngine(iface) return self._engines[iface] class FtdiMpsseEngine: """FTDI MPSSE virtual engine Far from being complete for now """ COMMAND_PREFIX = \ 'GET SET READ WRITE RW ENABLE DISABLE CLK LOOPBACK SEND DRIVE' def build_commands(prefix: str): commands = {} for cmd in dir(Ftdi): if cmd[0] not in ascii_uppercase: continue value = getattr(Ftdi, cmd) if not isinstance(value, int): continue family = cmd.split('_')[0] if family not in prefix.split(): continue commands[value] = cmd return commands COMMANDS = build_commands(COMMAND_PREFIX) ST_IDLE = range(1) def __init__(self, iface: int): self.log = getLogger('pyftdi.mpsse.tracer') self._if = iface self._trace_tx = bytearray() self._trace_rx = bytearray() self._state = self.ST_IDLE self._clkdiv5 = False self._cmd_decoded = True self._resp_decoded = True self._last_codes = deque() self._expect_resp = deque() # positive: byte, negative: bit count def send(self, buf: Union[bytes, bytearray]) -> None: self._trace_tx.extend(buf) while self._trace_tx: try: code = self._trace_tx[0] cmd = self.COMMANDS[code] if self._cmd_decoded: self.log.debug('[%d]:[Command: %02X: %s]', self._if, code, cmd) cmd_decoder = getattr(self, '_cmd_%s' % cmd.lower()) rdepth = len(self._expect_resp) try: self._cmd_decoded = cmd_decoder() except AttributeError as exc: raise ValueError(str(exc)) from exc if len(self._expect_resp) > rdepth: self._last_codes.append(code) if self._cmd_decoded: continue # not enough data in buffer to decode a whole command return except IndexError: self.log.warning('[%d]:Empty buffer on %02X: %s', self._if, code, cmd) except KeyError: self.log.warning('[%d]:Unknown command code: %02X', self._if, code) except AttributeError as exc: self.log.warning('[%d]:Decoder for command %s [%02X] is not ' 'implemented', self._if, cmd, code) except ValueError as exc: self.log.warning('[%d]:Decoder for command %s [%02X] failed: ' '%s', self._if, cmd, code, exc) # on error, flush all buffers self.log.warning('Flush TX/RX buffers') self._trace_tx = bytearray() self._trace_rx = bytearray() self._last_codes.clear() def receive(self, buf: Union[bytes, bytearray]) -> None: self.log.info(' .. %s', hexlify(buf).decode()) self._trace_rx.extend(buf) while self._trace_rx: code = None try: code = self._last_codes.popleft() cmd = self.COMMANDS[code] resp_decoder = getattr(self, '_resp_%s' % cmd.lower()) self._resp_decoded = resp_decoder() if self._resp_decoded: continue # not enough data in buffer to decode a whole response return except IndexError: self.log.warning('[%d]:Empty buffer', self._if) except KeyError: self.log.warning('[%d]:Unknown command code: %02X', self._if, code) except AttributeError: self.log.warning('[%d]:Decoder for response %s [%02X] is not ' 'implemented', self._if, cmd, code) # on error, flush RX buffer self.log.warning('[%d]:Flush RX buffer', self._if) self._trace_rx = bytearray() self._last_codes.clear() def _cmd_enable_clk_div5(self): self.log.info(' [%d]:Enable clock divisor /5', self._if) self._clkdiv5 = True self._trace_tx[:] = self._trace_tx[1:] return True def _cmd_disable_clk_div5(self): self.log.info(' [%d]:Disable clock divisor /5', self._if) self._clkdiv5 = False self._trace_tx[:] = self._trace_tx[1:] return True def _cmd_set_tck_divisor(self): if len(self._trace_tx) < 3: return False value, = sunpack('<H', self._trace_tx[1:3]) base = 12E6 if self._clkdiv5 else 60E6 freq = base / ((1 + value) * 2) self.log.info(' [%d]:Set frequency %.3fMHZ', self._if, freq/1E6) self._trace_tx[:] = self._trace_tx[3:] return True def _cmd_loopback_end(self): self.log.info(' [%d]:Disable loopback', self._if) self._trace_tx[:] = self._trace_tx[1:] return True def _cmd_enable_clk_adaptive(self): self.log.info(' [%d]:Enable adaptive clock', self._if) self._trace_tx[:] = self._trace_tx[1:] return True def _cmd_disable_clk_adaptive(self): self.log.info(' [%d]:Disable adaptive clock', self._if) self._trace_tx[:] = self._trace_tx[1:] return True def _cmd_enable_clk_3phase(self): self.log.info(' [%d]:Enable 3-phase clock', self._if) self._trace_tx[:] = self._trace_tx[1:] return True def _cmd_disable_clk_3phase(self): self.log.info(' [%d]:Disable 3-phase clock', self._if) self._trace_tx[:] = self._trace_tx[1:] return True def _cmd_drive_zero(self): if len(self._trace_tx) < 3: return False value, = sunpack('H', self._trace_tx[1:3]) self.log.info(' [%d]:Open collector [15:0] %04x %s', self._if, value, self.bitfmt(value, 16)) self._trace_tx[:] = self._trace_tx[3:] return True def _cmd_send_immediate(self): self.log.debug(' [%d]:Send immediate', self._if) self._trace_tx[:] = self._trace_tx[1:] return True def _cmd_get_bits_low(self): self._trace_tx[:] = self._trace_tx[1:] self._expect_resp.append(1) return True def _cmd_get_bits_high(self): self._trace_tx[:] = self._trace_tx[1:] self._expect_resp.append(1) return True def _cmd_set_bits_low(self): if len(self._trace_tx) < 3: return False value, direction = sunpack('BB', self._trace_tx[1:3]) self.log.info(' [%d]:Set gpio[7:0] %02x %s', self._if, value, self.bm2str(value, direction)) self._trace_tx[:] = self._trace_tx[3:] return True def _cmd_set_bits_high(self): if len(self._trace_tx) < 3: return False value, direction = sunpack('BB', self._trace_tx[1:3]) self.log.info(' [%d]:Set gpio[15:8] %02x %s', self._if, value, self.bm2str(value, direction)) self._trace_tx[:] = self._trace_tx[3:] return True def _cmd_write_bytes_pve_msb(self): return self._decode_output_mpsse_bytes(currentframe().f_code.co_name) def _cmd_write_bytes_nve_msb(self): return self._decode_output_mpsse_bytes(currentframe().f_code.co_name) def _cmd_write_bytes_pve_lsb(self): return self._decode_output_mpsse_bytes(currentframe().f_code.co_name) def _cmd_write_bytes_nve_lsb(self): return self._decode_output_mpsse_bytes(currentframe().f_code.co_name) def _cmd_read_bytes_pve_msb(self): return self._decode_input_mpsse_byte_request() def _resp_read_bytes_pve_msb(self): return self._decode_input_mpsse_bytes(currentframe().f_code.co_name) def _cmd_read_bytes_nve_msb(self): return self._decode_input_mpsse_byte_request() def _resp_read_bytes_nve_msb(self): return self._decode_input_mpsse_bytes(currentframe().f_code.co_name) def _cmd_read_bytes_pve_lsb(self): return self._decode_input_mpsse_byte_request() def _resp_read_bytes_pve_lsb(self): return self._decode_input_mpsse_bytes(currentframe().f_code.co_name) def _cmd_read_bytes_nve_lsb(self): return self._decode_input_mpsse_byte_request() def _resp_read_bytes_nve_lsb(self): return self._decode_input_mpsse_bytes(currentframe().f_code.co_name) def _cmd_rw_bytes_nve_pve_msb(self): return self._decode_output_mpsse_bytes(currentframe().f_code.co_name, True) def _resp_rw_bytes_nve_pve_msb(self): return self._decode_input_mpsse_bytes(currentframe().f_code.co_name) def _cmd_rw_bytes_pve_nve_msb(self): return self._decode_output_mpsse_bytes(currentframe().f_code.co_name, True) def _resp_rw_bytes_pve_nve_msb(self): return self._decode_input_mpsse_bytes(currentframe().f_code.co_name) def _cmd_write_bits_pve_msb(self): return self._decode_output_mpsse_bits(currentframe().f_code.co_name) def _cmd_write_bits_nve_msb(self): return self._decode_output_mpsse_bits(currentframe().f_code.co_name) def _cmd_write_bits_pve_lsb(self): return self._decode_output_mpsse_bits(currentframe().f_code.co_name) def _cmd_write_bits_nve_lsb(self): return self._decode_output_mpsse_bits(currentframe().f_code.co_name) def _cmd_read_bits_pve_msb(self): return self._decode_input_mpsse_bit_request() def _resp_read_bits_pve_msb(self): return self._decode_input_mpsse_bits(currentframe().f_code.co_name) def _cmd_read_bits_nve_msb(self): return self._decode_input_mpsse_bit_request() def _resp_read_bits_nve_msb(self): return self._decode_input_mpsse_bits(currentframe().f_code.co_name) def _cmd_read_bits_pve_lsb(self): return self._decode_input_mpsse_bit_request() def _resp_read_bits_pve_lsb(self): return self._decode_input_mpsse_bits(currentframe().f_code.co_name) def _cmd_read_bits_nve_lsb(self): return self._decode_input_mpsse_bit_request() def _resp_read_bits_nve_lsb(self): return self._decode_input_mpsse_bits(currentframe().f_code.co_name) def _cmd_rw_bits_nve_pve_msb(self): return self._decode_output_mpsse_bits(currentframe().f_code.co_name, True) def _resp_rw_bits_nve_pve_msb(self): return self._decode_input_mpsse_bits(currentframe().f_code.co_name) def _cmd_rw_bits_pve_nve_msb(self): return self._decode_output_mpsse_bits(currentframe().f_code.co_name, True) def _resp_rw_bits_pve_nve_msb(self): return self._decode_input_mpsse_bits(currentframe().f_code.co_name) def _resp_get_bits_low(self): if self._trace_rx: return False value = self._trace_rx[0] self.log.info(' [%d]:Get gpio[7:0] %02x %s', self._if, value, self.bm2str(value, 0xFF)) self._trace_rx[:] = self._trace_rx[1:] return True def _resp_get_bits_high(self): if self._trace_rx: return False value = self._trace_rx[0] self.log.info(' [%d]:Get gpio[15:8] %02x %s', self._if, value, self.bm2str(value, 0xFF)) self._trace_rx[:] = self._trace_rx[1:] return True def _decode_output_mpsse_bytes(self, caller, expect_rx=False): if len(self._trace_tx) < 4: return False length = sunpack('<H', self._trace_tx[1:3])[0] + 1 if len(self._trace_tx) < 4 + length: return False if expect_rx: self._expect_resp.append(length) payload = self._trace_tx[3:3+length] funcname = caller[5:].title().replace('_', '') self.log.info(' [%d]:%s> (%d) %s', self._if, funcname, length, hexlify(payload).decode('utf8')) self._trace_tx[:] = self._trace_tx[3+length:] return True def _decode_output_mpsse_bits(self, caller, expect_rx=False): if len(self._trace_tx) < 3: return False bitlen = self._trace_tx[1] + 1 if expect_rx: self._expect_resp.append(-bitlen) payload = self._trace_tx[2] funcname = caller[5:].title().replace('_', '') msb = caller[5:][-3].lower() == 'm' self.log.info(' %s> (%d) %s', funcname, bitlen, self.bit2str(payload, bitlen, msb)) self._trace_tx[:] = self._trace_tx[3:] return True def _decode_input_mpsse_byte_request(self): if len(self._trace_tx) < 3: return False length = sunpack('<H', self._trace_tx[1:3])[0] + 1 self._expect_resp.append(length) self._trace_tx[:] = self._trace_tx[3:] return True def _decode_input_mpsse_bit_request(self): if len(self._trace_tx) < 2: return False bitlen = self._trace_tx[1] + 1 self._expect_resp.append(-bitlen) self._trace_tx[:] = self._trace_tx[2:] return True def _decode_input_mpsse_bytes(self, caller): if not self._expect_resp: self.log.warning('[%d]:Response w/o request?', self._if) return False if self._expect_resp[0] < 0: self.log.warning('[%d]:Handling byte request w/ bit length', self._if) return False if len(self._trace_rx) < self._expect_resp[0]: # peek return False length = self._expect_resp.popleft() payload = self._trace_rx[:length] self._trace_rx[:] = self._trace_rx[length:] funcname = caller[5:].title().replace('_', '') self.log.info(' %s< (%d) %s', funcname, length, hexlify(payload).decode('utf8')) return True def _decode_input_mpsse_bits(self, caller): if not self._expect_resp: self.log.warning('[%d]:Response w/o request?', self._if) return False if not self._trace_rx: # peek return False if self._expect_resp[0] > 0: self.log.warning('[%d]:Handling bit request w/ byte length', self._if) bitlen = -self._expect_resp.popleft() payload = self._trace_rx[0] self._trace_rx[:] = self._trace_rx[1:] funcname = caller[5:].title().replace('_', '') msb = caller[5:][-3].lower() == 'm' self.log.info(' %s< (%d) %s', funcname, bitlen, self.bit2str(payload, bitlen, msb)) return True @classmethod def bit2str(cls, value: int, count: int, msb: bool, hiz: str = '_') -> str: mask = (1 << count) - 1 if msb: mask <<= 8 - count return cls.bm2str(value, mask, hiz) @classmethod def bm2str(cls, value: int, mask: int, hiz: str = '_') -> str: vstr = cls.bitfmt(value, 8) mstr = cls.bitfmt(mask, 8) return ''.join([m == '1' and v or hiz for v, m in zip(vstr, mstr)]) @classmethod def bitfmt(cls, value, width): return format(value, '0%db' % width) # rw_bytes_pve_pve_lsb # rw_bytes_pve_nve_lsb # rw_bytes_nve_pve_lsb # rw_bytes_nve_nve_lsb # rw_bits_pve_pve_lsb # rw_bits_pve_nve_lsb # rw_bits_nve_pve_lsb # rw_bits_nve_nve_lsb # write_bits_tms_pve # write_bits_tms_nve # rw_bits_tms_pve_pve # rw_bits_tms_nve_pve # rw_bits_tms_pve_nve # rw_bits_tms_nve_nve
8,866
1,303
<reponame>miusuncle/TBase /* * Tencent is pleased to support the open source community by making TBase available. * * Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. * * TBase is licensed under the BSD 3-Clause License, except for the third-party component listed below. * * A copy of the BSD 3-Clause License is included in this file. * * Other dependencies and licenses: * * Open Source Software Licensed Under the PostgreSQL License: * -------------------------------------------------------------------- * 1. Postgres-XL XL9_5_STABLE * Portions Copyright (c) 2015-2016, 2ndQuadrant Ltd * Portions Copyright (c) 2012-2015, TransLattice, Inc. * Portions Copyright (c) 2010-2017, Postgres-XC Development Group * Portions Copyright (c) 1996-2015, The PostgreSQL Global Development Group * Portions Copyright (c) 1994, The Regents of the University of California * * Terms of the PostgreSQL License: * -------------------------------------------------------------------- * Permission to use, copy, modify, and distribute this software and its * documentation for any purpose, without fee, and without a written agreement * is hereby granted, provided that the above copyright notice and this * paragraph and the following two paragraphs appear in all copies. * * IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING * LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS * DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS * ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. * * * Terms of the BSD 3-Clause License: * -------------------------------------------------------------------- * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of THL A29 Limited nor the names of its contributors may be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * */ /* Name: imath.h Purpose: Arbitrary precision integer arithmetic routines. Author: <NAME> <http://spinning-yarns.org/michael/sw/> Info: Id: imath.h 21 2006-04-02 18:58:36Z sting Copyright (C) 2002 <NAME>, All Rights Reserved. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /* contrib/pgcrypto/imath.h */ #ifndef IMATH_H_ #define IMATH_H_ /* use always 32bit digits - should some arch use 16bit digits? */ #define USE_LONG_LONG #include <limits.h> typedef unsigned char mp_sign; typedef unsigned int mp_size; typedef int mp_result; #ifdef USE_LONG_LONG typedef uint32 mp_digit; typedef uint64 mp_word; #define MP_DIGIT_MAX 0xFFFFFFFFULL #define MP_WORD_MAX 0xFFFFFFFFFFFFFFFFULL #else typedef uint16 mp_digit; typedef uint32 mp_word; #define MP_DIGIT_MAX 0xFFFFUL #define MP_WORD_MAX 0xFFFFFFFFUL #endif typedef struct mpz { mp_digit *digits; mp_size alloc; mp_size used; mp_sign sign; } mpz_t , *mp_int; #define MP_DIGITS(Z) ((Z)->digits) #define MP_ALLOC(Z) ((Z)->alloc) #define MP_USED(Z) ((Z)->used) #define MP_SIGN(Z) ((Z)->sign) extern const mp_result MP_OK; extern const mp_result MP_FALSE; extern const mp_result MP_TRUE; extern const mp_result MP_MEMORY; extern const mp_result MP_RANGE; extern const mp_result MP_UNDEF; extern const mp_result MP_TRUNC; extern const mp_result MP_BADARG; #define MP_DIGIT_BIT (sizeof(mp_digit) * CHAR_BIT) #define MP_WORD_BIT (sizeof(mp_word) * CHAR_BIT) #define MP_MIN_RADIX 2 #define MP_MAX_RADIX 36 extern const mp_sign MP_NEG; extern const mp_sign MP_ZPOS; #define mp_int_is_odd(Z) ((Z)->digits[0] & 1) #define mp_int_is_even(Z) !((Z)->digits[0] & 1) mp_size mp_get_default_precision(void); void mp_set_default_precision(mp_size s); mp_size mp_get_multiply_threshold(void); void mp_set_multiply_threshold(mp_size s); mp_result mp_int_init(mp_int z); mp_int mp_int_alloc(void); mp_result mp_int_init_size(mp_int z, mp_size prec); mp_result mp_int_init_copy(mp_int z, mp_int old); mp_result mp_int_init_value(mp_int z, int value); mp_result mp_int_set_value(mp_int z, int value); void mp_int_clear(mp_int z); void mp_int_free(mp_int z); mp_result mp_int_copy(mp_int a, mp_int c); /* c = a */ void mp_int_swap(mp_int a, mp_int c); /* swap a, c */ void mp_int_zero(mp_int z); /* z = 0 */ mp_result mp_int_abs(mp_int a, mp_int c); /* c = |a| */ mp_result mp_int_neg(mp_int a, mp_int c); /* c = -a */ mp_result mp_int_add(mp_int a, mp_int b, mp_int c); /* c = a + b */ mp_result mp_int_add_value(mp_int a, int value, mp_int c); mp_result mp_int_sub(mp_int a, mp_int b, mp_int c); /* c = a - b */ mp_result mp_int_sub_value(mp_int a, int value, mp_int c); mp_result mp_int_mul(mp_int a, mp_int b, mp_int c); /* c = a * b */ mp_result mp_int_mul_value(mp_int a, int value, mp_int c); mp_result mp_int_mul_pow2(mp_int a, int p2, mp_int c); mp_result mp_int_sqr(mp_int a, mp_int c); /* c = a * a */ mp_result mp_int_div(mp_int a, mp_int b, /* q = a / b */ mp_int q, mp_int r); /* r = a % b */ mp_result mp_int_div_value(mp_int a, int value, /* q = a / value */ mp_int q, int *r); /* r = a % value */ mp_result mp_int_div_pow2(mp_int a, int p2, /* q = a / 2^p2 */ mp_int q, mp_int r); /* r = q % 2^p2 */ mp_result mp_int_mod(mp_int a, mp_int m, mp_int c); /* c = a % m */ #define mp_int_mod_value(A, V, R) mp_int_div_value((A), (V), 0, (R)) mp_result mp_int_expt(mp_int a, int b, mp_int c); /* c = a^b */ mp_result mp_int_expt_value(int a, int b, mp_int c); /* c = a^b */ int mp_int_compare(mp_int a, mp_int b); /* a <=> b */ int mp_int_compare_unsigned(mp_int a, mp_int b); /* |a| <=> |b| */ int mp_int_compare_zero(mp_int z); /* a <=> 0 */ int mp_int_compare_value(mp_int z, int value); /* a <=> v */ /* Returns true if v|a, false otherwise (including errors) */ int mp_int_divisible_value(mp_int a, int v); /* Returns k >= 0 such that z = 2^k, if one exists; otherwise < 0 */ int mp_int_is_pow2(mp_int z); mp_result mp_int_exptmod(mp_int a, mp_int b, mp_int m, mp_int c); /* c = a^b (mod m) */ mp_result mp_int_exptmod_evalue(mp_int a, int value, mp_int m, mp_int c); /* c = a^v (mod m) */ mp_result mp_int_exptmod_bvalue(int value, mp_int b, mp_int m, mp_int c); /* c = v^b (mod m) */ mp_result mp_int_exptmod_known(mp_int a, mp_int b, mp_int m, mp_int mu, mp_int c); /* c = a^b (mod m) */ mp_result mp_int_redux_const(mp_int m, mp_int c); mp_result mp_int_invmod(mp_int a, mp_int m, mp_int c); /* c = 1/a (mod m) */ mp_result mp_int_gcd(mp_int a, mp_int b, mp_int c); /* c = gcd(a, b) */ mp_result mp_int_egcd(mp_int a, mp_int b, mp_int c, /* c = gcd(a, b) */ mp_int x, mp_int y); /* c = ax + by */ mp_result mp_int_sqrt(mp_int a, mp_int c); /* c = floor(sqrt(q)) */ /* Convert to an int, if representable (returns MP_RANGE if not). */ mp_result mp_int_to_int(mp_int z, int *out); /* Convert to nul-terminated string with the specified radix, writing at most limit characters including the nul terminator */ mp_result mp_int_to_string(mp_int z, mp_size radix, char *str, int limit); /* Return the number of characters required to represent z in the given radix. May over-estimate. */ mp_result mp_int_string_len(mp_int z, mp_size radix); /* Read zero-terminated string into z */ mp_result mp_int_read_string(mp_int z, mp_size radix, const char *str); mp_result mp_int_read_cstring(mp_int z, mp_size radix, const char *str, char **end); /* Return the number of significant bits in z */ mp_result mp_int_count_bits(mp_int z); /* Convert z to two's complement binary, writing at most limit bytes */ mp_result mp_int_to_binary(mp_int z, unsigned char *buf, int limit); /* Read a two's complement binary value into z from the given buffer */ mp_result mp_int_read_binary(mp_int z, unsigned char *buf, int len); /* Return the number of bytes required to represent z in binary. */ mp_result mp_int_binary_len(mp_int z); /* Convert z to unsigned binary, writing at most limit bytes */ mp_result mp_int_to_unsigned(mp_int z, unsigned char *buf, int limit); /* Read an unsigned binary value into z from the given buffer */ mp_result mp_int_read_unsigned(mp_int z, unsigned char *buf, int len); /* Return the number of bytes required to represent z as unsigned output */ mp_result mp_int_unsigned_len(mp_int z); /* Return a statically allocated string describing error code res */ const char *mp_error_string(mp_result res); #if 0 void s_print(char *tag, mp_int z); void s_print_buf(char *tag, mp_digit *buf, mp_size num); #endif #endif /* end IMATH_H_ */
4,622
341
#include "cpp-utilities/time_code.h" #include <iostream> #include <cstdio> int main() { constexpr int Count = 10; using ms = std::chrono::microseconds; auto time1 = time_code<ms, Count>([]() { printf("Hello World\n"); }); std::cerr << "printf(\"Hello World\\n\") x 10, Took: " << time1.count() << " \xC2\xB5s to execute." << std::endl; auto time2 = time_code_once<ms>([]() { printf("Hello World\n"); }); std::cerr << "printf(\"Hello World\\n\") once, Took: " << time2.count() << " \xC2\xB5s to execute." << std::endl; }
228
427
// RUN: %clang_cc1 -fsyntax-only -verify -ftemplate-backtrace-limit 2 %s // // FIXME: Disable this test when Clang was built with ASan, because ASan // increases our per-frame stack usage enough that this test no longer fits // within our normal stack space allocation. // REQUIRES: not_asan template<int N, typename T> struct X : X<N+1, T*> {}; // expected-error-re@8 {{recursive template instantiation exceeded maximum depth of 1024{{$}}}} // expected-note@8 {{instantiation of template class}} // expected-note@8 {{skipping 1023 contexts in backtrace}} // expected-note@8 {{use -ftemplate-depth=N to increase recursive template instantiation depth}} X<0, int> x; // expected-note {{in instantiation of}} // FIXME: It crashes. Investigating. // UNSUPPORTED: mingw32
237
419
<reponame>bt3ze/pcflib<filename>betteryao/OTExtension/util/Miracl/dssign.cpp /* * Digital Signature Algorithm (DSA) * * See Communications ACM July 1992, Vol. 35 No. 7 * This new standard for digital signatures has been proposed by * the American National Institute of Standards and Technology (NIST) * under advisement from the National Security Agency (NSA). * * This program asks for the name of a <file>, computes its message digest, * signs it, and outputs the signature to a file <file>.dss. It is assumed * that the common values p, q and g, as well as the private key of the * signer have been previously generated by the dssgen program * * Requires: big.cpp */ #include <iostream> #include <cstring> #include <fstream> #include "big.h" using namespace std; Miracl precision(200,256); void strip(char *name) { /* strip off filename extension */ int i; for (i=0;name[i]!='\0';i++) { if (name[i]!='.') continue; name[i]='\0'; break; } } static Big hash(ifstream &fp) { /* compute hash function */ char ch,s[20]; int i; Big h; sha sh; shs_init(&sh); forever { /* read in bytes from message file */ fp.get(ch); if (fp.eof()) break; shs_process(&sh,ch); } shs_hash(&sh,s); h=from_binary(20,s); return h; } int main() { ifstream common("common.dss"); /* construct file I/O streams */ ifstream private_key("private.dss"); ifstream message; ofstream signature; char ifname[13],ofname[13]; Big p,q,g,x,r,s,k,h; long seed; int bits; miracl *mip=&precision; /* randomise */ cout << "Enter 9 digit random number seed = "; cin >> seed; irand(seed); /* get common data */ common >> bits; mip->IOBASE=16; common >> p >> q >> g; mip->IOBASE=10; /* calculate r - this can be done off-line, and hence amortized to almost nothing */ k=rand(q); r=pow(g,k,p); /* see brick.cpp for method to speed this up */ r%=q; /* get private key of signer */ private_key >> x; /* get message to be signed */ cout << "file to be signed = " ; cin >> ifname; strcpy(ofname,ifname); strip(ofname); strcat(ofname,".dss"); message.open(ifname,ios::binary|ios::in); if (!message) { cout << "Unable to open file " << ifname << "\n"; return 0; } h=hash(message); /* calculate s */ k=inverse(k,q); s=((h+x*r)*k)%q; signature.open(ofname); signature << r << endl; signature << s << endl; return 0; }
1,082
15,577
<reponame>pdv-ru/ClickHouse<gh_stars>1000+ #pragma once #include <Parsers/IParserBase.h> namespace DB { /// Parses data type as ASTFunction /// Examples: Int8, Array(Nullable(FixedString(16))), DOUBLE PRECISION, Nested(UInt32 CounterID, FixedString(2) UserAgentMajor) class ParserDataType : public IParserBase { protected: const char * getName() const override { return "data type"; } bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override; }; }
164
567
// // Copyright 2020 BigQuery Utils // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #ifndef ZETASQL_HELPER_LOCAL_SERVICE_LOCAL_SERVICE_GRPC_H_ #define ZETASQL_HELPER_LOCAL_SERVICE_LOCAL_SERVICE_GRPC_H_ #include "zetasql_helper/local_service/local_service.grpc.pb.h" #include "zetasql_helper/local_service/local_service.pb.h" #include "zetasql_helper/local_service/local_service.h" namespace bigquery::utils::zetasql_helper::local_service { // Implementation of ZetaSql Helper LocalService Grpc service. class ZetaSqlHelperLocalServiceGrpcImpl : public ZetaSqlHelperLocalService::Service { public: grpc::Status Tokenize(grpc::ServerContext* context, const TokenizeRequest* request, TokenizeResponse* response) override; grpc::Status ExtractFunctionRange(grpc::ServerContext* context, const ExtractFunctionRangeRequest* request, ExtractFunctionRangeResponse* response) override; grpc::Status LocateTableRanges(grpc::ServerContext* context, const LocateTableRangesRequest* request, LocateTableRangesResponse* response) override; grpc::Status GetAllKeywords(grpc::ServerContext* context, const GetAllKeywordsRequest* request, GetAllKeywordsResponse* response) override; grpc::Status FixColumnNotGrouped(grpc::ServerContext* context, const FixColumnNotGroupedRequest* request, FixColumnNotGroupedResponse* response) override; grpc::Status FixDuplicateColumns(grpc::ServerContext* context, const FixDuplicateColumnsRequest* request, FixDuplicateColumnsResponse* response) override; private: ZetaSqlHelperLocalServiceImpl service_; }; } // bigquery::utils::zetasql_helper::local_service #endif // ZETASQL_HELPER_LOCAL_SERVICE_LOCAL_SERVICE_GRPC_H_
907
655
<gh_stars>100-1000 import tensorflow as tf class Add2DCell(tf.contrib.rnn.RNNCell): """Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078).""" def __init__(self, num_units, channels): self._num_units = num_units self._channels = channels @property def state_size(self): return self._num_units @property def output_size(self): return self._num_units # inputs: [batch_size, height, width, channels] # state: [batch_size, height, width, num_units] def __call__(self, inputs, state, step, scope=None): with tf.variable_scope(scope or type(self).__name__): # "Add2DCell" new_h = (inputs + step * state) / (step + 1) return new_h, new_h
316
348
{"nom":"Flaux","circ":"6ème circonscription","dpt":"Gard","inscrits":286,"abs":141,"votants":145,"blancs":18,"nuls":1,"exp":126,"res":[{"nuance":"REM","nom":"<NAME>","voix":71},{"nuance":"FN","nom":"<NAME>","voix":55}]}
87
2,215
<gh_stars>1000+ # # Copyright (c) 2022 salesforce.com, inc. # All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause # """ Deep autoencoding Gaussian mixture model for anomaly detection (DAGMM) """ import copy import random from typing import List try: import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import DataLoader except ImportError as e: err = ( "Try installing Merlion with optional dependencies using `pip install salesforce-merlion[deep-learning]` or " "`pip install `salesforce-merlion[all]`" ) raise ImportError(str(e) + ". " + err) import numpy as np import pandas as pd from merlion.utils import UnivariateTimeSeries, TimeSeries from merlion.models.base import NormalizingConfig from merlion.models.anomaly.base import DetectorBase, DetectorConfig, MultipleTimeseriesDetectorMixin from merlion.post_process.threshold import AggregateAlarms from merlion.utils.misc import ProgressBar, initializer from merlion.models.anomaly.utils import InputData, batch_detect class DAGMMConfig(DetectorConfig, NormalizingConfig): """ Configuration class for DAGMM. The normalization is inherited from `NormalizingConfig`. The input data will be standardized automatically. """ _default_threshold = AggregateAlarms(alm_threshold=2.5, abs_score=True) @initializer def __init__( self, gmm_k: int = 3, hidden_size: int = 5, sequence_len: int = 1, lambda_energy: float = 0.1, lambda_cov_diag: float = 0.005, lr: float = 1e-3, batch_size: int = 256, num_epochs: int = 10, **kwargs ): """ :param gmm_k: The number of Gaussian distributions :param hidden_size: The hidden size of the autoencoder module in DAGMM :param sequence_len: The input series length, e.g., input = [x(t-sequence_len+1)...,x(t-1),x(t)] :param lambda_energy: The regularization weight for the energy term :param lambda_cov_diag: The regularization weight for the covariance diagonal entries :param lr: The learning rate during training :param batch_size: The batch size during training :param num_epochs: The number of training epochs """ super().__init__(**kwargs) class DAGMM(DetectorBase, MultipleTimeseriesDetectorMixin): """ Deep autoencoding Gaussian mixture model for anomaly detection (DAGMM). DAGMM combines an autoencoder with a Gaussian mixture model to model the distribution of the reconstruction errors. DAGMM jointly optimizes the parameters of the deep autoencoder and the mixture model simultaneously in an end-to-end fashion. - paper: `<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> and <NAME>. Deep Autoencoding Gaussian Mixture Model for Unsupervised Anomaly Detection. 2018. <https://openreview.net/forum?id=BJJLHbb0->`_. """ config_class = DAGMMConfig _default_train_config = dict() def __init__(self, config: DAGMMConfig): super().__init__(config) self.gmm_k = config.gmm_k self.hidden_size = config.hidden_size self.sequence_length = config.sequence_len self.lambda_energy = config.lambda_energy self.lambda_cov_diag = config.lambda_cov_diag self.lr = config.lr self.batch_size = config.batch_size self.num_epochs = config.num_epochs self.device = "cuda" if torch.cuda.is_available() else "cpu" self.data_dim = -1 self.dagmm, self.optimizer = None, None self.train_energy, self._threshold = None, None @property def require_even_sampling(self) -> bool: return False @property def require_univariate(self) -> bool: return False def _build_model(self, dim): hidden_size = self.hidden_size + int(dim / 20) dagmm = DAGMMModule( autoencoder=AEModule(n_features=dim, sequence_length=self.sequence_length, hidden_size=hidden_size), n_gmm=self.gmm_k, latent_dim=hidden_size + 2, device=self.device, ) return dagmm def _step(self, input_data, max_grad_norm=5): enc, dec, z, gamma = self.dagmm(input_data) total_loss, sample_energy, recon_error, cov_diag = self.dagmm.loss_func( x=input_data, recon_x=dec, z=z, gamma=gamma, lambda_energy=self.lambda_energy, lambda_cov_diag=self.lambda_cov_diag, ) self.optimizer.zero_grad() total_loss = torch.clamp(total_loss, max=1e7) total_loss.backward() torch.nn.utils.clip_grad_norm_(self.dagmm.parameters(), max_grad_norm) self.optimizer.step() return total_loss, sample_energy, recon_error, cov_diag def _train(self, train_data: pd.DataFrame, train_config=None): index = train_data.index train_data = train_data.values dataset = InputData(train_data, k=self.sequence_length) data_loader = DataLoader( dataset=dataset, batch_size=self.batch_size, shuffle=True, collate_fn=InputData.collate_func ) if self.dagmm is None and self.optimizer is None: self.dagmm = self._build_model(train_data.shape[1]).to(self.device) self.optimizer = torch.optim.Adam(self.dagmm.parameters(), lr=self.lr) self.dagmm.train() self.data_dim = train_data.shape[1] bar = ProgressBar(total=self.num_epochs) for epoch in range(self.num_epochs): total_loss, recon_error = 0, 0 for input_data in data_loader: input_data = input_data.to(self.device) loss, _, error, _ = self._step(input_data.float()) total_loss += loss recon_error += error if bar is not None: bar.print( epoch + 1, prefix="", suffix="Complete, Loss {:.4f}, Recon_error: {:.4f}".format( total_loss / len(data_loader), recon_error / len(data_loader) ), ) return pd.DataFrame(batch_detect(self, train_data), index=index, columns=["anom_score"]) def _detect(self, X): """ :param X: The input time series, a numpy array. """ self.dagmm.eval() dataset = InputData(X, k=self.sequence_length) data_loader = DataLoader(dataset=dataset, batch_size=1, shuffle=False) test_energy = np.full((self.sequence_length, X.shape[0]), np.nan) for i, sequence in enumerate(data_loader): sequence = sequence.to(self.device) enc, dec, z, gamma = self.dagmm(sequence.float()) sample_energy, _ = self.dagmm.compute_energy(z, size_average=False) idx = (i % self.sequence_length, np.arange(i, i + self.sequence_length)) test_energy[idx] = sample_energy.cpu().data.numpy() test_energy = np.nanmean(test_energy, axis=0) return test_energy def _get_sequence_len(self): return self.sequence_length def train_multiple( self, multiple_train_data: List[TimeSeries], anomaly_labels: List[TimeSeries] = None, train_config=None, post_rule_train_config=None, ) -> List[TimeSeries]: """ Trains the anomaly detector (unsupervised) and its post-rule (supervised, if labels are given) on the input multiple time series. :param multiple_train_data: a list of `TimeSeries` of metric values to train the model. :param anomaly_labels: a list of `TimeSeries` indicating which timestamps are anomalous. Optional. :param train_config: Additional training config dict with keys: * | "n_epochs": ``int`` indicating how many times the model must be | trained on the timeseries in ``multiple_train_data``. Defaults to 1. * | "shuffle": ``bool`` indicating if the ``multiple_train_data`` collection | should be shuffled before every epoch. Defaults to True if "n_epochs" > 1. :param post_rule_train_config: The config to use for training the model's post-rule. The model's default post-rule train config is used if none is supplied here. :return: A list of `TimeSeries` of the model's anomaly scores on the training data with each element corresponds to time series from ``multiple_train_data``. """ if train_config is None: train_config = copy.deepcopy(self._default_train_config) n_epochs = train_config.pop("n_epochs", 1) shuffle = train_config.pop("shuffle", n_epochs > 1) if anomaly_labels is not None: assert len(multiple_train_data) == len(anomaly_labels) else: anomaly_labels = [None] * len(multiple_train_data) train_scores_list = [] for _ in range(n_epochs): if shuffle: random.shuffle(multiple_train_data) for train_data, anomaly_series in zip(multiple_train_data, anomaly_labels): train_scores_list.append( self.train( train_data=train_data, anomaly_labels=anomaly_series, train_config=train_config, post_rule_train_config=post_rule_train_config # FIXME: the post-rule (calibrator and threshold) is trained individually on each time series # but ideally it needs to be re-trained on all of the `train_scores_list` ) ) return train_scores_list def _get_anomaly_score(self, time_series: pd.DataFrame, time_series_prev: pd.DataFrame = None) -> pd.DataFrame: ts = pd.concat((time_series_prev, time_series)) if time_series_prev is None else time_series scores = batch_detect(self, ts.values) return pd.DataFrame(scores[-len(time_series) :], index=time_series.index) class AEModule(nn.Module): """ The autoencoder module used in DAGMM. :meta private: """ def __init__(self, n_features, sequence_length, hidden_size, activation=nn.Tanh): """ :param n_features: The number of the input features (number of variables) :param sequence_length: The length of the input sequence :param hidden_size: The latent size :param activation: The activation function for the hidden layers """ super().__init__() input_length = n_features * sequence_length dec_steps = 2 ** np.arange(max(np.ceil(np.log2(hidden_size)), 2), np.log2(input_length))[1:] dec_setup = np.concatenate([[hidden_size], dec_steps.repeat(2), [input_length]]) enc_setup = dec_setup[::-1] layers = np.array([[nn.Linear(int(a), int(b)), activation()] for a, b in enc_setup.reshape(-1, 2)]) self.encoder = nn.Sequential(*layers.flatten()[:-1]) layers = np.array([[nn.Linear(int(a), int(b)), activation()] for a, b in dec_setup.reshape(-1, 2)]) self.decoder = nn.Sequential(*layers.flatten()[:-1]) def forward(self, x, return_latent=False): enc = self.encoder(x.view(x.shape[0], -1).float()) dec = self.decoder(enc) recon_x = dec.view(x.shape) return (recon_x, enc) if return_latent else recon_x class DAGMMModule(nn.Module): """ The DAGMM module used in the DAGMM detector. :meta private: """ def __init__(self, autoencoder, n_gmm, latent_dim, device): """ :param autoencoder: The autoencoder model :param n_gmm: The number of Gaussian mixtures :param latent_dim: The latent dimension :param device: CUDA or CPU """ super(DAGMMModule, self).__init__() self.add_module("autoencoder", autoencoder) self.device = device self.estimation = nn.Sequential( *[nn.Linear(latent_dim, 10), nn.Tanh(), nn.Linear(10, n_gmm), nn.Softmax(dim=1)] ) self.register_buffer("phi", torch.zeros(n_gmm)) self.register_buffer("mu", torch.zeros(n_gmm, latent_dim)) self.register_buffer("cov", torch.zeros(n_gmm, latent_dim, latent_dim)) @staticmethod def relative_euclidean_distance(a, b, dim=1): return (a - b).norm(2, dim=dim) / torch.clamp(a.norm(2, dim=dim), min=1e-10) def forward(self, x): dec, enc = self.autoencoder(x, return_latent=True) a, b = x.view(x.shape[0], -1), dec.view(dec.shape[0], -1) cos_distance = F.cosine_similarity(a, b, dim=1).unsqueeze(-1) euc_distance = DAGMMModule.relative_euclidean_distance(a, b, dim=1).unsqueeze(-1) z = torch.cat([enc, euc_distance, cos_distance], dim=1) return enc, dec, z, self.estimation(z) def compute_gmms(self, z, gamma): # weights sum_gamma = torch.sum(gamma, dim=0) phi = sum_gamma / gamma.shape[0] # means and covariances mu = torch.sum(gamma.unsqueeze(-1) * z.unsqueeze(1), dim=0) / sum_gamma.unsqueeze(-1) z_mu = z.unsqueeze(1) - mu.unsqueeze(0) z_mu_outer = z_mu.unsqueeze(-1) * z_mu.unsqueeze(-2) cov = torch.sum(gamma.unsqueeze(-1).unsqueeze(-1) * z_mu_outer, dim=0) / sum_gamma.unsqueeze(-1).unsqueeze(-1) # store these values for prediction self.phi, self.mu, self.cov = phi.data, mu.data, cov.data return phi, mu, cov def compute_energy(self, z, phi=None, mu=None, cov=None, size_average=True, eps=1e-6): phi = self.phi if phi is None else phi mu = self.mu if mu is None else mu cov = self.cov if cov is None else cov cov_inv, cov_det, cov_diag = [], [], 0 for i in range(cov.shape[0]): cov_k = cov[i] + torch.eye(cov.shape[1], device=self.device) * eps inv_k = torch.FloatTensor(np.linalg.pinv(cov_k.cpu().data.numpy())).to(self.device) cov_inv.append(inv_k.unsqueeze(0)) eigenvalues = np.linalg.eigvals(cov_k.data.cpu().numpy() * (2 * np.pi)) determinant = np.prod(np.clip(eigenvalues, a_min=eps, a_max=None)) cov_det.append(determinant) cov_diag += torch.sum(1.0 / cov_k.diag()) z_mu = z.unsqueeze(1) - mu.unsqueeze(0) cov_inv = torch.cat(cov_inv, dim=0) cov_det = torch.FloatTensor(cov_det).to(self.device) exp_term_tmp = -0.5 * torch.sum(torch.sum(z_mu.unsqueeze(-1) * cov_inv.unsqueeze(0), dim=-2) * z_mu, dim=-1) max_val = torch.max(exp_term_tmp.clamp(min=0), dim=1, keepdim=True)[0] exp_term = torch.exp(exp_term_tmp - max_val) sample_energy = -max_val.squeeze() - torch.log( torch.sum(phi.unsqueeze(0) * exp_term / (torch.sqrt(cov_det) + eps).unsqueeze(0), dim=1) + eps ) if size_average: sample_energy = torch.mean(sample_energy) return sample_energy, cov_diag def loss_func(self, x, recon_x, z, gamma, lambda_energy, lambda_cov_diag): recon_error = torch.mean((x.view(*recon_x.shape) - recon_x) ** 2) phi, mu, cov = self.compute_gmms(z, gamma) sample_energy, cov_diag = self.compute_energy(z, phi, mu, cov) loss = recon_error + lambda_energy * sample_energy + lambda_cov_diag * cov_diag return loss, sample_energy, recon_error, cov_diag
6,978
190,993
<reponame>EricRemmerswaal/tensorflow # Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `op_reg_gen` module.""" # pylint: disable=missing-function-docstring # pylint: disable=invalid-name # pylint: disable=g-direct-tensorflow-import import sys from tensorflow.compiler.mlir.python.mlir_wrapper import filecheck_wrapper as fw from tensorflow.compiler.mlir.tfr.python import composite from tensorflow.compiler.mlir.tfr.python.op_reg_gen import gen_register_op from tensorflow.python.platform import test Composite = composite.Composite @composite.Composite( 'TestNoOp', derived_attrs=['T: numbertype'], outputs=['o1: T']) def _composite_no_op(): pass @Composite( 'TestCompositeOp', inputs=['x: T', 'y: T'], attrs=['act: {"", "relu"}', 'trans: bool = true'], derived_attrs=['T: numbertype'], outputs=['o1: T', 'o2: T']) def _composite_op(x, y, act, trans): return x + act, y + trans class TFRGenTensorTest(test.TestCase): """MLIR Generation Tests for MLIR TFR Program.""" def test_op_reg_gen(self): cxx_code = gen_register_op(sys.modules[__name__]) cxx_code_exp = r""" CHECK: #include "tensorflow/core/framework/op.h" CHECK-EMPTY CHECK: namespace tensorflow { CHECK-EMPTY CHECK-LABEL: REGISTER_OP("TestNoOp") CHECK-NEXT: .Attr("T: numbertype") CHECK-NEXT: .Output("o1: T"); CHECK-EMPTY CHECK-LABEL: REGISTER_OP("TestCompositeOp") CHECK-NEXT: .Input("x: T") CHECK-NEXT: .Input("y: T") CHECK-NEXT: .Attr("act: {'', 'relu'}") CHECK-NEXT: .Attr("trans: bool = true") CHECK-NEXT: .Attr("T: numbertype") CHECK-NEXT: .Output("o1: T") CHECK-NEXT: .Output("o2: T"); CHECK-EMPTY CHECK: } // namespace tensorflow """ self.assertTrue(fw.check(str(cxx_code), cxx_code_exp), str(cxx_code)) if __name__ == '__main__': test.main()
1,024
1,564
<filename>extensions/protobuf/src/main/java/org/modelmapper/protobuf/primitive/IntConverters.java<gh_stars>1000+ /* * Copyright 2018 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.modelmapper.protobuf.primitive; import com.google.protobuf.Int32Value; import com.google.protobuf.Int64Value; import org.modelmapper.Converter; import org.modelmapper.spi.MappingContext; /** * Converters for bool. * * @author <NAME> */ public class IntConverters { public static final Converter<Int32Value.Builder, Integer> BUILDER_TO_INT = new Converter<Int32Value.Builder, Integer>() { @Override public Integer convert(MappingContext<Int32Value.Builder, Integer> context) { if (context.getSource() != null) return context.getSource().getValue(); return null; } }; public static final Converter<Int64Value.Builder, Long> BUILDER_TO_LONG = new Converter<Int64Value.Builder, Long>() { @Override public Long convert(MappingContext<Int64Value.Builder, Long> context) { if (context.getSource() != null) return context.getSource().getValue(); return null; } }; public static final Converter<Int32Value, Integer> INT_VALUE_TO_INT = new Converter<Int32Value, Integer>() { @Override public Integer convert(MappingContext<Int32Value, Integer> context) { if (context.getSource() != null) return context.getSource().getValue(); return null; } }; public static final Converter<Int64Value, Long> LONG_VALUE_TO_LONG = new Converter<Int64Value, Long>() { @Override public Long convert(MappingContext<Int64Value, Long> context) { if (context.getSource() != null) return context.getSource().getValue(); return null; } }; public static final Converter<Integer, Int32Value.Builder> INT_TO_BUILDER = new Converter<Integer, Int32Value.Builder>() { @Override public Int32Value.Builder convert(MappingContext<Integer, Int32Value.Builder> context) { if (context.getSource() != null) return Int32Value.newBuilder().setValue(context.getSource()); return null; } }; public static final Converter<Long, Int64Value.Builder> LONG_TO_BUILDER = new Converter<Long, Int64Value.Builder>() { @Override public Int64Value.Builder convert(MappingContext<Long, Int64Value.Builder> context) { if (context.getSource() != null) return Int64Value.newBuilder().setValue(context.getSource()); return null; } }; public static final Converter<Integer, Int32Value> INT_TO_INT_VALUE = new Converter<Integer, Int32Value>() { @Override public Int32Value convert(MappingContext<Integer, Int32Value> context) { if (context.getSource() != null) return Int32Value.of(context.getSource()); return null; } }; public static final Converter<Long, Int64Value> LONG_TO_LONG_VALUE = new Converter<Long, Int64Value>() { @Override public Int64Value convert(MappingContext<Long, Int64Value> context) { if (context.getSource() != null) return Int64Value.of(context.getSource()); return null; } }; }
1,356
2,617
<gh_stars>1000+ // Workaround to include D3D10.h with VS2003 #ifndef __out #define __out #endif #ifndef __in #define __in #endif #ifndef __inout #define __inout #endif #ifndef __in_opt #define __in_opt #endif #ifndef __out_opt #define __out_opt #endif #ifndef __inout_opt #define __inout_opt #endif #ifndef __in_ecount #define __in_ecount(x) #endif #ifndef __in_ecount_opt #define __in_ecount_opt(x) #endif #ifndef __out_ecount #define __out_ecount(x) #endif #ifndef __out_ecount_opt #define __out_ecount_opt(x) #endif #ifndef __inout_ecount #define __inout_ecount(x) #endif #ifndef __inout_ecount_opt #define __inout_ecount_opt(x) #endif #ifndef __in_bcount_opt #define __in_bcount_opt(x) #endif #ifndef __out_bcount_opt #define __out_bcount_opt(x) #endif #ifndef __inout_bcount_opt #define __inout_bcount_opt(x) #endif
421
2,373
<gh_stars>1000+ { "use": "sig", "kty": "EC", "kid": "<KEY>", "crv": "P-256", "alg": "ES256", "x": "JtPSvIKayHsCHobDnNWtOdoroh-MDwK<KEY>", "y": "tP7xR5rpu6azzZsozdmzouyVByuTUDYSSAELTOAtu7g" }
126
1,350
<filename>sdk/deploymentmanager/azure-resourcemanager-deploymentmanager/src/main/java/com/azure/resourcemanager/deploymentmanager/models/RestHealthCheckStepAttributes.java // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. // Code generated by Microsoft (R) AutoRest Code Generator. package com.azure.resourcemanager.deploymentmanager.models; import com.azure.core.annotation.Fluent; import com.azure.core.annotation.JsonFlatten; import com.azure.core.util.logging.ClientLogger; import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonTypeInfo; import com.fasterxml.jackson.annotation.JsonTypeName; import java.util.List; /** Defines the REST health check step properties. */ @JsonTypeInfo(use = JsonTypeInfo.Id.NAME, include = JsonTypeInfo.As.PROPERTY, property = "type") @JsonTypeName("REST") @JsonFlatten @Fluent public class RestHealthCheckStepAttributes extends HealthCheckStepAttributes { @JsonIgnore private final ClientLogger logger = new ClientLogger(RestHealthCheckStepAttributes.class); /* * The list of checks that form the health check step. */ @JsonProperty(value = "properties.healthChecks") private List<RestHealthCheck> healthChecks; /** * Get the healthChecks property: The list of checks that form the health check step. * * @return the healthChecks value. */ public List<RestHealthCheck> healthChecks() { return this.healthChecks; } /** * Set the healthChecks property: The list of checks that form the health check step. * * @param healthChecks the healthChecks value to set. * @return the RestHealthCheckStepAttributes object itself. */ public RestHealthCheckStepAttributes withHealthChecks(List<RestHealthCheck> healthChecks) { this.healthChecks = healthChecks; return this; } /** {@inheritDoc} */ @Override public RestHealthCheckStepAttributes withWaitDuration(String waitDuration) { super.withWaitDuration(waitDuration); return this; } /** {@inheritDoc} */ @Override public RestHealthCheckStepAttributes withMaxElasticDuration(String maxElasticDuration) { super.withMaxElasticDuration(maxElasticDuration); return this; } /** {@inheritDoc} */ @Override public RestHealthCheckStepAttributes withHealthyStateDuration(String healthyStateDuration) { super.withHealthyStateDuration(healthyStateDuration); return this; } /** * Validates the instance. * * @throws IllegalArgumentException thrown if the instance is not valid. */ @Override public void validate() { super.validate(); if (healthChecks() != null) { healthChecks().forEach(e -> e.validate()); } } }
1,003
313
<gh_stars>100-1000 // // JSRequestTools.h // JSLoadMoreServiceDemo // // Created by 乔同新 on 2017/8/7. // Copyright © 2017年 乔同新. All rights reserved. // Github: https://github.com/Josin22/JSLoadMoreService #import <Foundation/Foundation.h> @class RACSignal,JSFileConfig; typedef NS_ENUM(NSInteger, RequestType) { RequestTypeGET = 0, RequestTypePOST, RequestTypeDELETE, RequestTypePUT, RequestTypePOSTUPLOAD }; /** * 简单用RAC封装的请求下 */ @interface JSRequestTools : NSObject + (RACSignal *)js_getURL:(NSString *)url para:(NSMutableDictionary *)para; + (RACSignal *)js_postURL:(NSString *)url para:(NSMutableDictionary *)para; + (RACSignal *)js_deleteURL:(NSString *)url para:(NSMutableDictionary *)para; + (RACSignal *)js_putURL:(NSString *)url para:(NSMutableDictionary *)para; + (RACSignal *)js_uploadURL:(NSString *)url para:(NSMutableDictionary *)para files:(NSMutableArray <JSFileConfig *>*)files; @end /** * 用来封装上文件数据的模型类 */ @interface JSFileConfig : NSObject /** * 文件数据 */ @property (nonatomic, strong) NSData *fileData; /** * 服务器接收参数名 */ @property (nonatomic, copy) NSString *name; /** * 文件名 */ @property (nonatomic, copy) NSString *fileName; /** * 文件类型 */ @property (nonatomic, copy) NSString *mimeType; + (instancetype)fileConfigWithfileData:(NSData *)fileData name:(NSString *)name fileName:(NSString *)fileName mimeType:(NSString *)mimeType; - (instancetype)initWithfileData:(NSData *)fileData name:(NSString *)name fileName:(NSString *)fileName mimeType:(NSString *)mimeType; @end
827
433
/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package org.redkale.net.http; import java.lang.annotation.*; import static java.lang.annotation.ElementType.METHOD; import static java.lang.annotation.RetentionPolicy.RUNTIME; /** * 配合 &#64;HttpMapping 使用。 * 用于对&#64;HttpMapping方法中参数描述 <br> * 从RestService生成过来的HttpMapping,标记为&#64;RestUserid、&#64;RestAddress的参数不会生成HttpParam * * <p> * 详情见: https://redkale.org * * @author zhangjx */ @Documented @Target({METHOD}) @Retention(RUNTIME) @Repeatable(HttpParam.HttpParams.class) public @interface HttpParam { /** * 参数名 * * @return String */ String name(); /** * 参数的数据类型 * * @return Class */ Class type(); /** * 参数的泛型数据类型在HttpServlet里的字段名,且字段类型必须是 java.lang.reflect.Type <br> * 如果参数数据类型不是泛型,则值为空 * * @since 2.5.0 * @return String */ String typeref() default ""; /** * 备注描述 * * @return String */ String comment() default ""; /** * 参数来源类型 * * @return HttpParameterStyle */ HttpParameterStyle style() default HttpParameterStyle.QUERY; /** * 转换数字byte/short/int/long时所用的进制数, 默认10进制 * * @return int */ int radix() default 10; /** * 参数是否必传, 框架运行中不作验证, only for OpenAPI Specification 3 * * @return boolean */ boolean required() default true; /** * 是否过期字段, only for OpenAPI Specification 3 * * @return boolean */ boolean deprecated() default false; /** * for OpenAPI Specification 3 * * @return String */ String example() default ""; /** * 配合 &#64;HttpParam 使用。 * 用于对&#64;HttpParam中参数的来源类型 * * <p> * 详情见: https://redkale.org * * @author zhangjx */ public static enum HttpParameterStyle { QUERY, HEADER, COOKIE, BODY; } @Documented @Target({METHOD}) @Retention(RUNTIME) @interface HttpParams { HttpParam[] value(); } }
1,310
9,729
{"commands":{"count":0,"time":0},"getmore":{"count":0,"time":0},"insert":{"count":0,"time":0},"queries":{"count":1,"time":23},"readLock":{"count":1,"time":23},"remove":{"count":0,"time":0},"total":{"count":1,"time":23},"update":{"count":0,"time":0},"writeLock":{"count":0,"time":0}}
97
3,386
<filename>src/formats/ods.c<gh_stars>1000+ /******************************************************************************* * Copyright (c) 2013-2021, <NAME> <<EMAIL>> * * All rights reserved. * * * * This file is a part of sc-im * * * * sc-im is a spreadsheet program that is based on sc. The original authors * * of sc are <NAME> and <NAME>, and mods were later added by * * <NAME>. * * * * Redistribution and use in source and binary forms, with or without * * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright * * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * * notice, this list of conditions and the following disclaimer in the * * documentation and/or other materials provided with the distribution. * * 3. All advertising materials mentioning features or use of this software * * must display the following acknowledgement: * * This product includes software developed by <NAME> * * <<EMAIL>>. * * 4. Neither the name of the <NAME> nor the * * names of other contributors may be used to endorse or promote products * * derived from this software without specific prior written permission. * * * * THIS SOFTWARE IS PROVIDED BY <NAME> ''AS IS'' AND ANY * * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * * DISCLAIMED. IN NO EVENT SHALL ANDRES MARTINELLI BE LIABLE FOR ANY * * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;* * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *******************************************************************************/ /** * \file xlsx.c * \author <NAME> <<EMAIL>> * \date 2021-03-27 * \brief file that contains the functions to support ods file import * * \details ods import requires: * - libzip-dev * - libxml2-dev */ #ifdef ODS #include <errno.h> #include <zip.h> #include <libxml/parser.h> #include "../tui.h" #include "../cmds/cmds.h" #include "../sc.h" #include "../utils/string.h" #endif extern struct session * session; /** * \brief open_ods() files * * \param[in] fname * \param[in] encoding * * \return none */ int open_ods(char * fname, char * encoding) { #ifdef ODS struct roman * roman = session->cur_doc; struct sheet * sh = roman->cur_sh; struct zip * za; struct zip_file * zf; struct zip_stat sb_content; char buf[100]; int err; int len; // open zip file if ((za = zip_open(fname, 0, &err)) == NULL) { zip_error_to_str(buf, sizeof(buf), err, errno); sc_error("can't open zip archive `%s': %s", fname, buf); return -1; } // open content.xml char * name = "content.xml"; zf = zip_fopen(za, name, ZIP_FL_UNCHANGED); char * content = NULL; if (zf) { // some files may not have strings zip_stat(za, name, ZIP_FL_UNCHANGED, &sb_content); content = (char *) malloc(sb_content.size); len = zip_fread(zf, content, sb_content.size); if (len < 0) { sc_error("cannot read file %s.\n", name); free(content); return -1; } zip_fclose(zf); } // XML parse for the sheet file xmlDoc * doc = NULL; // this initialize the library and check potential ABI mismatches // between the version it was compiled for and the actual shared // library used. LIBXML_TEST_VERSION doc = xmlReadMemory(content, sb_content.size, "noname.xml", NULL, XML_PARSE_NOBLANKS); if (doc == NULL) { sc_error("error: could not parse ods file"); if (content != NULL) free(content); return -1; } // parse here xmlNode * cur_node = xmlDocGetRootElement(doc)->xmlChildrenNode; xmlNode * child_node = NULL; wchar_t line_interp[FBUFLEN] = L""; int r=0, c=-1; while (cur_node != NULL && strcmp((char *) cur_node->name, "body")) cur_node = cur_node->next; // forward until reach body cur_node = cur_node->xmlChildrenNode; while (cur_node != NULL && strcmp((char *) cur_node->name, "spreadsheet")) cur_node = cur_node->next; // forward until reach spreadsheet cur_node = cur_node->xmlChildrenNode; while (cur_node != NULL && strcmp((char *) cur_node->name, "table")) cur_node = cur_node->next; // forward until reach table cur_node = cur_node->xmlChildrenNode; char * strvalue = NULL; char * st = NULL; char * strtype = NULL; char * value = NULL; char * strf; char * value_type = NULL; // here traverse table content while (cur_node != NULL) { if (! strcmp((char *) cur_node->name, "table-row")) { // we are inside a table-row // each of these is a row child_node = cur_node->xmlChildrenNode; r++; c=-1; while (child_node != NULL) { c++; if ((value_type = (char *) xmlGetProp(child_node, (xmlChar *) "value-type")) == NULL) { child_node = child_node->next; continue; }; // each of these is table-cell (a column) strtype = value_type; // type //if (!strcmp(strtype, "time") //get time-value //TODO //if (!strcmp(strtype, "date") //get date-value //TODO if (!strcmp(strtype, "float")) { char * formula = (char *) xmlGetProp(child_node, (xmlChar *) "formula"); if (formula != NULL) { strf = str_replace (formula, "of:=",""); strcpy(formula, strf); free(strf); strf = str_replace (formula, "[.",""); strcpy(formula, strf); free(strf); strf = str_replace (formula, ";",","); strcpy(formula, strf); free(strf); strf = str_replace (formula, ":.",":"); strcpy(formula, strf); free(strf); strf = str_replace (formula, "]",""); strcpy(formula, strf); free(strf); // we take some common function and adds a @ to them strf = str_replace (formula, "COUNT","@COUNT"); strcpy(formula, strf); free(strf); strf = str_replace (formula, "SUM","@SUM"); strcpy(formula, strf); free(strf); strf = str_replace (formula, "PRODUCT","@PROD"); strcpy(formula, strf); free(strf); strf = str_replace (formula, "AVERAGE","@AVG"); strcpy(formula, strf); free(strf); strf = str_replace (formula, "MIN","@MIN"); strcpy(formula, strf); free(strf); strf = str_replace (formula, "MAX","@MAX"); strcpy(formula, strf); free(strf); strf = str_replace (formula, "ABS","@ABS"); strcpy(formula, strf); free(strf); strf = str_replace (formula, "STDEV","@STDDEV"); strcpy(formula, strf); free(strf); swprintf(line_interp, FBUFLEN, L"let %s%d=%s", coltoa(c), r, formula); xmlFree(formula); formula = NULL; } else { value = (char *) xmlGetProp(child_node, (xmlChar *) "value"); // type double l = atof((char *) value); swprintf(line_interp, FBUFLEN, L"let %s%d=%.15f", coltoa(c), r, l); xmlFree(value); value = NULL; } send_to_interp(line_interp); } else if (!strcmp(strtype, "string") && !strcmp((char *) child_node->xmlChildrenNode->name, "p")) { strvalue = (char *) xmlNodeGetContent(child_node->xmlChildrenNode); st = str_replace (strvalue, "\"", "''"); clean_carrier(st); // we handle padding swprintf(line_interp, FBUFLEN, L"label %s%d=\"%s\"", coltoa(c), r, st); send_to_interp(line_interp); free(st); xmlFree(strvalue); strvalue = NULL; } child_node = child_node->next; xmlFree(value_type); value_type = NULL; } } cur_node = cur_node->next; // forward until reach table } int_deleterow(sh, sh->currow, 1); /* delete the first row */ // free the document xmlFreeDoc(doc); // Free the global variables that may have been allocated by the parser xmlCleanupParser(); free(content); // close zip file if (zip_close(za) == -1) { sc_error("cannot close zip archive `%s'", fname); return -1; } #endif return 0; }
5,281
14,668
// Copyright 2021 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef ASH_PUBLIC_CPP_TAB_CLUSTER_TAB_CLUSTER_UI_CONTROLLER_H_ #define ASH_PUBLIC_CPP_TAB_CLUSTER_TAB_CLUSTER_UI_CONTROLLER_H_ #include <memory> #include "ash/public/cpp/ash_public_export.h" #include "ash/public/cpp/tab_cluster/clusterer.h" #include "base/observer_list.h" namespace ash { class TabClusterUIItem; // TabClusterUIController: // Manage the tab items of the opened, modified and closed tabs. When there is // a tab item changed, it will notify its observers. class ASH_PUBLIC_EXPORT TabClusterUIController { public: class Observer : public base::CheckedObserver { public: virtual void OnTabItemAdded(TabClusterUIItem* tab_item) = 0; virtual void OnTabItemUpdated(TabClusterUIItem* tab_item) = 0; virtual void OnTabItemRemoved(TabClusterUIItem* tab_item) = 0; }; using TabItems = std::vector<std::unique_ptr<TabClusterUIItem>>; TabClusterUIController(); TabClusterUIController(const TabClusterUIController&) = delete; TabClusterUIController& operator=(const TabClusterUIController&) = delete; ~TabClusterUIController(); TabClusterUIItem* AddTabItem(std::unique_ptr<TabClusterUIItem> tab_item); void UpdateTabItem(TabClusterUIItem* tab_item); void RemoveTabItem(TabClusterUIItem* tab_item); void ChangeActiveCandidate(TabClusterUIItem* old_active_item, TabClusterUIItem* new_active_item); void AddObserver(Observer* observer); void RemoveObserver(Observer* observer); private: Clusterer clusterer_; // List of tab items. TabItems tab_items_; base::ObserverList<Observer> observers_; }; } // namespace ash #endif // ASH_PUBLIC_CPP_TAB_CLUSTER_TAB_CLUSTER_UI_CONTROLLER_H_
677