max_stars_count
int64
301
224k
text
stringlengths
6
1.05M
token_count
int64
3
727k
2,216
from .ball_query import ball_query __all__ = ['ball_query']
21
5,169
{ "name": "ContentProvidersSQLite", "summary": "A content provider used for accessing and modifying data in SQLite database.", "version": "1.4.0", "platforms": { "ios": "9.0" }, "swift_version": "4.2", "cocoapods_version": "~> 1.5.3", "static_framework": true, "homepage": "https://github.com/roxiemobile/content-providers.ios", "authors": { "Roxie Mobile Ltd.": "<EMAIL>", "<NAME>": "<EMAIL>" }, "license": "BSD-4-Clause", "source": { "git": "https://github.com/roxiemobile/content-providers.ios.git", "tag": "1.4.0" }, "source_files": "Modules/RoxieMobile.ContentProviders/Sources/SQLite/{Sources,Dependencies}/**/*.swift", "pod_target_xcconfig": { "GCC_PREPROCESSOR_DEFINITIONS": "$(inherited) SQLITE_HAS_CODEC=1 CONTENTPROVIDERS_FRAMEWORK_VERSION=@\\\"1.4.0\\\"", "OTHER_SWIFT_FLAGS": "$(inherited) -DSQLITE_SWIFT_SQLCIPHER" }, "user_target_xcconfig": { "GCC_PREPROCESSOR_DEFINITIONS": "$(inherited) SQLITE_HAS_CODEC=1", "OTHER_SWIFT_FLAGS": "$(inherited) -DSQLITE_SWIFT_SQLCIPHER" }, "dependencies": { "CryptoSwift": [ "~> 0.13.1" ], "SwiftCommons/Concurrent": [ "~> 1.4.1" ], "SwiftCommons/Extensions": [ "~> 1.4.1" ], "SQLite.swift/SQLCipher": [ "~> 0.11.5", "< 0.11.6" ], "SQLCipher": [ ">= 3.4.2", "< 4.0.0" ] } }
672
1,755
<gh_stars>1000+ /*========================================================================= Program: Visualization Toolkit Module: vtkCursor3D.h Copyright (c) <NAME>, <NAME>, <NAME> All rights reserved. See Copyright.txt or http://www.kitware.com/Copyright.htm for details. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the above copyright notice for more information. =========================================================================*/ /** * @class vtkCursor3D * @brief generate a 3D cursor representation * * vtkCursor3D is an object that generates a 3D representation of a cursor. * The cursor consists of a wireframe bounding box, three intersecting * axes lines that meet at the cursor focus, and "shadows" or projections * of the axes against the sides of the bounding box. Each of these * components can be turned on/off. * * This filter generates two output datasets. The first (Output) is just the * geometric representation of the cursor. The second (Focus) is a single * point at the focal point. */ #ifndef vtkCursor3D_h #define vtkCursor3D_h #include "vtkFiltersGeneralModule.h" // For export macro #include "vtkPolyDataAlgorithm.h" class VTKFILTERSGENERAL_EXPORT vtkCursor3D : public vtkPolyDataAlgorithm { public: vtkTypeMacro(vtkCursor3D, vtkPolyDataAlgorithm); void PrintSelf(ostream& os, vtkIndent indent) override; /** * Construct with model bounds = (-1,1,-1,1,-1,1), focal point = (0,0,0), * all parts of cursor visible, and wrapping off. */ static vtkCursor3D* New(); ///@{ /** * Set / get the boundary of the 3D cursor. */ void SetModelBounds(double xmin, double xmax, double ymin, double ymax, double zmin, double zmax); void SetModelBounds(const double bounds[6]); vtkGetVectorMacro(ModelBounds, double, 6); ///@} ///@{ /** * Set/Get the position of cursor focus. If translation mode is on, * then the entire cursor (including bounding box, cursor, and shadows) * is translated. Otherwise, the focal point will either be clamped to the * bounding box, or wrapped, if Wrap is on. (Note: this behavior requires * that the bounding box is set prior to the focal point.) */ void SetFocalPoint(double x[3]); void SetFocalPoint(double x, double y, double z) { double xyz[3]; xyz[0] = x; xyz[1] = y; xyz[2] = z; this->SetFocalPoint(xyz); } vtkGetVectorMacro(FocalPoint, double, 3); ///@} ///@{ /** * Turn on/off the wireframe bounding box. */ vtkSetMacro(Outline, vtkTypeBool); vtkGetMacro(Outline, vtkTypeBool); vtkBooleanMacro(Outline, vtkTypeBool); ///@} ///@{ /** * Turn on/off the wireframe axes. */ vtkSetMacro(Axes, vtkTypeBool); vtkGetMacro(Axes, vtkTypeBool); vtkBooleanMacro(Axes, vtkTypeBool); ///@} ///@{ /** * Turn on/off the wireframe x-shadows. */ vtkSetMacro(XShadows, vtkTypeBool); vtkGetMacro(XShadows, vtkTypeBool); vtkBooleanMacro(XShadows, vtkTypeBool); ///@} ///@{ /** * Turn on/off the wireframe y-shadows. */ vtkSetMacro(YShadows, vtkTypeBool); vtkGetMacro(YShadows, vtkTypeBool); vtkBooleanMacro(YShadows, vtkTypeBool); ///@} ///@{ /** * Turn on/off the wireframe z-shadows. */ vtkSetMacro(ZShadows, vtkTypeBool); vtkGetMacro(ZShadows, vtkTypeBool); vtkBooleanMacro(ZShadows, vtkTypeBool); ///@} ///@{ /** * Enable/disable the translation mode. If on, changes in cursor position * cause the entire widget to translate along with the cursor. * By default, translation mode is off. */ vtkSetMacro(TranslationMode, vtkTypeBool); vtkGetMacro(TranslationMode, vtkTypeBool); vtkBooleanMacro(TranslationMode, vtkTypeBool); ///@} ///@{ /** * Turn on/off cursor wrapping. If the cursor focus moves outside the * specified bounds, the cursor will either be restrained against the * nearest "wall" (Wrap=off), or it will wrap around (Wrap=on). */ vtkSetMacro(Wrap, vtkTypeBool); vtkGetMacro(Wrap, vtkTypeBool); vtkBooleanMacro(Wrap, vtkTypeBool); ///@} /** * Get the focus for this filter. */ vtkPolyData* GetFocus() { return this->Focus; } ///@{ /** * Turn every part of the 3D cursor on or off. */ void AllOn(); void AllOff(); ///@} protected: vtkCursor3D(); ~vtkCursor3D() override; int RequestData(vtkInformation*, vtkInformationVector**, vtkInformationVector*) override; vtkPolyData* Focus; double ModelBounds[6]; double FocalPoint[3]; vtkTypeBool Outline; vtkTypeBool Axes; vtkTypeBool XShadows; vtkTypeBool YShadows; vtkTypeBool ZShadows; vtkTypeBool TranslationMode; vtkTypeBool Wrap; private: vtkCursor3D(const vtkCursor3D&) = delete; void operator=(const vtkCursor3D&) = delete; }; #endif
1,760
1,337
// Copyright (c) 2020 <NAME> // // This software is provided 'as-is', without any express or implied // warranty. In no event will the authors be held liable for any damages // arising from the use of this software. // // Permission is granted to anyone to use this software for any purpose, // including commercial applications, and to alter it and redistribute it // freely, subject to the following restrictions: // // 1. The origin of this software must not be misrepresented; you must not // claim that you wrote the original software. If you use this software // in a product, an acknowledgement in the product documentation would be // appreciated but is not required. // 2. Altered source versions must be plainly marked as such, and must not be // misrepresented as being the original software. // 3. This notice may not be removed or altered from any source distribution. #include "TaskScheduler.h" #include "Timer.h" #include <stdio.h> #include <inttypes.h> #include <assert.h> #include <vector> #include <algorithm> #ifndef _WIN32 #include <string.h> #endif using namespace enki; TaskScheduler g_TS; uint32_t g_numTestsRun = 0; uint32_t g_numTestsSucceeded = 0; void RunTestFunction( const char* pTestFuncName_, std::function<bool ()> TestFunc ) { ++g_numTestsRun; fprintf(stdout, "\nRunning: Test %2u: %s...\n", g_numTestsRun, pTestFuncName_ ); bool bSuccess = TestFunc(); if( bSuccess ) { fprintf(stdout, "SUCCESS: Test %2u: %s.\n", g_numTestsRun, pTestFuncName_ ); ++g_numTestsSucceeded; } else { fprintf(stderr, "FAILURE: Test %2u: %s.\n", g_numTestsRun, pTestFuncName_ ); } } struct ParallelSumTaskSet : ITaskSet { struct Count { // prevent false sharing. uint64_t count; char cacheline[64]; }; Count* m_pPartialSums; uint32_t m_NumPartialSums; ParallelSumTaskSet( uint32_t size_ ) : m_pPartialSums(NULL), m_NumPartialSums(0) { m_SetSize = size_; } virtual ~ParallelSumTaskSet() { delete[] m_pPartialSums; } void Init( uint32_t numPartialSums_ ) { delete[] m_pPartialSums; m_NumPartialSums =numPartialSums_ ; m_pPartialSums = new Count[ m_NumPartialSums ]; memset( m_pPartialSums, 0, sizeof(Count)*m_NumPartialSums ); } void ExecuteRange( TaskSetPartition range_, uint32_t threadnum_ ) override { assert( m_pPartialSums && m_NumPartialSums ); uint64_t sum = m_pPartialSums[threadnum_].count; for( uint64_t i = range_.start; i < range_.end; ++i ) { sum += i + 1; } m_pPartialSums[threadnum_].count = sum; } }; struct ParallelReductionSumTaskSet : ITaskSet { ParallelSumTaskSet* m_pParallelSum; Dependency m_Dependency; uint64_t m_FinalSum; ParallelReductionSumTaskSet( ParallelSumTaskSet* pParallelSum_ ) : m_pParallelSum( pParallelSum_ ), m_Dependency( pParallelSum_, this ), m_FinalSum(0) { } void ExecuteRange( TaskSetPartition range, uint32_t threadnum ) override { for( uint32_t i = 0; i < m_pParallelSum->m_NumPartialSums; ++i ) { m_FinalSum += m_pParallelSum->m_pPartialSums[i].count; } } }; void threadFunction( uint32_t setSize_, bool* pbRegistered_, uint64_t* pSumParallel_ ) { *pbRegistered_ = g_TS.RegisterExternalTaskThread(); if( *pbRegistered_ ) { ParallelSumTaskSet parallelSumTask( setSize_ ); parallelSumTask.Init( g_TS.GetNumTaskThreads() ); ParallelReductionSumTaskSet parallelReductionSumTaskSet( &parallelSumTask ); g_TS.AddTaskSetToPipe( &parallelSumTask ); g_TS.WaitforTask( &parallelReductionSumTaskSet ); g_TS.DeRegisterExternalTaskThread(); *pSumParallel_ = parallelReductionSumTaskSet.m_FinalSum; } } struct PinnedTask : IPinnedTask { PinnedTask() : IPinnedTask( enki::GetNumHardwareThreads() - 1 ) // set pinned thread to 0 {} virtual void Execute() { threadRunOn = g_TS.GetThreadNum(); } uint32_t threadRunOn = 0; }; struct TestPriorities : ITaskSet { void ExecuteRange( TaskSetPartition range_, uint32_t threadnum_ ) override { } }; struct CustomAllocData { const char* domainName; uint64_t totalAllocations; }; void* CustomAllocFunc( size_t align_, size_t size_, void* userData_, const char* file_, int line_ ) { CustomAllocData* data = (CustomAllocData*)userData_; data->totalAllocations += size_; return DefaultAllocFunc( align_, size_, userData_, file_, line_ ); }; void CustomFreeFunc( void* ptr_, size_t size_, void* userData_, const char* file_, int line_ ) { CustomAllocData* data = (CustomAllocData*)userData_; data->totalAllocations -= size_; DefaultFreeFunc( ptr_, size_, userData_, file_, line_ ); }; std::atomic<int32_t> gs_DependencyCounter = {0}; struct TestDependenciesTaskSet : ITaskSet { int32_t m_Counter = 0; std::vector<Dependency> m_Dependencies; void ExecuteRange( TaskSetPartition range_, uint32_t threadnum_ ) override { m_Counter = gs_DependencyCounter.fetch_add(1); } }; struct TestDependenciesPinnedTask : IPinnedTask { int32_t m_Counter = 0; std::vector<Dependency> m_Dependencies; void Execute() override { m_Counter = gs_DependencyCounter.fetch_add(1); } }; struct TestDependenciesCompletable : ICompletable { std::vector<Dependency> m_Dependencies; }; int main(int argc, const char * argv[]) { fprintf( stdout,"\n---Running Tests----\n" ); enki::TaskSchedulerConfig baseConfig; fprintf( stdout,"System has %u hardware threads reported\n", baseConfig.numTaskThreadsToCreate + 1 ); if( 0 == baseConfig.numTaskThreadsToCreate ) { baseConfig.numTaskThreadsToCreate = 1; fprintf( stdout,"As only one hardware thread forcing enkiTS to use 2 threads\n"); } uint32_t setSize = 20 * 1024 * 1024; uint64_t sumSerial; // evaluate serial for test comparison with parallel runs ParallelSumTaskSet serialTask( setSize ); serialTask.Init( 1 ); TaskSetPartition range = { 0, setSize }; serialTask.ExecuteRange( range, 0 ); sumSerial = serialTask.m_pPartialSums[0].count; RunTestFunction( "Test Lots of TaskSets", [&]()->bool { g_TS.Initialize( baseConfig ); static constexpr uint32_t TASK_RANGE = 65*65; static constexpr uint32_t TASK_COUNT = 50; struct TaskSet : public enki::ITaskSet { TaskSet() : enki::ITaskSet(TASK_RANGE) {}; virtual void ExecuteRange( TaskSetPartition range_, uint32_t threadnum_ ) override { if( range_.start >= TASK_RANGE && range_.end > TASK_RANGE ) { countErrors.fetch_add(1); } } std::atomic<int32_t> countErrors{ 0 }; }; TaskSet tasks[TASK_COUNT]; for( uint32_t i = 0; i < TASK_COUNT; ++i ) { g_TS.AddTaskSetToPipe( &tasks[i] ); } g_TS.WaitforAll(); bool bSuccess = true; for( uint32_t i = 0; i < TASK_COUNT; ++i ) { if( tasks[i].countErrors.load( std::memory_order_relaxed ) > 0 ) { bSuccess = false; break; } } return bSuccess; } ); RunTestFunction( "Parallel Reduction Sum", [&]()->bool { g_TS.Initialize( baseConfig ); ParallelSumTaskSet parallelSumTask( setSize ); parallelSumTask.Init( g_TS.GetNumTaskThreads() ); ParallelReductionSumTaskSet parallelReductionSumTaskSet( &parallelSumTask ); g_TS.AddTaskSetToPipe( &parallelSumTask ); g_TS.WaitforTask( &parallelReductionSumTaskSet ); fprintf( stdout,"\tParallelReductionSum: %" PRIu64 ", sumSerial: %" PRIu64 "\n", parallelReductionSumTaskSet.m_FinalSum, sumSerial ); return parallelReductionSumTaskSet.m_FinalSum == sumSerial; } ); RunTestFunction( "External Thread", [&]()->bool { enki::TaskSchedulerConfig config = baseConfig; config.numExternalTaskThreads = 1; bool bRegistered = false; uint64_t sumParallel = 0; g_TS.Initialize( config ); std::thread threads( threadFunction, setSize, &bRegistered, &sumParallel ); threads.join(); fprintf( stdout,"\tExternal thread sum: %" PRIu64 ", sumSerial: %" PRIu64 "\n", sumParallel, sumSerial ); if( !bRegistered ) { fprintf( stderr,"\tExternal thread did not register\n" ); return false; } if( sumParallel != sumSerial ) { return false; } return true; } ); RunTestFunction( "Pinned Task", [&]()->bool { g_TS.Initialize( baseConfig ); PinnedTask pinnedTask; g_TS.AddPinnedTask( &pinnedTask ); g_TS.WaitforTask( &pinnedTask ); fprintf( stdout,"\tPinned task ran on thread %u, requested thread %u\n", pinnedTask.threadRunOn, pinnedTask.threadNum ); return pinnedTask.threadRunOn == pinnedTask.threadNum; } ); RunTestFunction( "Priorities", [&]()->bool { // check priorities run in order by forcing single threaded execution enki::TaskSchedulerConfig config = baseConfig; config.numTaskThreadsToCreate = 0; g_TS.Initialize( config ); TestPriorities priorityTaskLow; priorityTaskLow.m_Priority = enki::TASK_PRIORITY_LOW; TestPriorities priorityTaskHigh; priorityTaskHigh.m_Priority = enki::TASK_PRIORITY_HIGH; g_TS.AddTaskSetToPipe( &priorityTaskLow ); g_TS.AddTaskSetToPipe( &priorityTaskHigh ); g_TS.WaitforTask( &priorityTaskHigh, priorityTaskHigh.m_Priority ); // WaitforTask should not have been run any task below high priority, // even though low priority task was added first if( priorityTaskLow.GetIsComplete() ) { return false; } g_TS.WaitforTask( &priorityTaskLow ); return true; } ); RunTestFunction( "Custom Allocator", [&]()->bool { enki::TaskSchedulerConfig config = baseConfig; config.customAllocator.alloc = CustomAllocFunc; config.customAllocator.free = CustomFreeFunc; CustomAllocData customAllocdata{ "enkITS", 0 }; config.customAllocator.userData = &customAllocdata; g_TS.Initialize( config ); uint64_t allocsAfterInit = customAllocdata.totalAllocations; fprintf( stdout,"\tenkiTS allocated bytes after init: %" PRIu64 "\n", customAllocdata.totalAllocations ); ParallelSumTaskSet parallelSumTask( setSize ); parallelSumTask.Init( g_TS.GetNumTaskThreads() ); ParallelReductionSumTaskSet parallelReductionSumTaskSet( &parallelSumTask ); g_TS.AddTaskSetToPipe( &parallelSumTask ); g_TS.WaitforTask( &parallelReductionSumTaskSet ); fprintf( stdout,"\tenkiTS allocated bytes after running tasks: %" PRIu64 "\n", customAllocdata.totalAllocations ); if( customAllocdata.totalAllocations != allocsAfterInit ) { fprintf( stderr,"\tERROR: enkiTS allocated bytes during scheduling\n" ); return false; } g_TS.WaitforAllAndShutdown(); fprintf( stdout,"\tenkiTS allocated bytes after shutdown: %" PRIu64 "\n", customAllocdata.totalAllocations ); return customAllocdata.totalAllocations == 0; } ); RunTestFunction( "Dependencies", [&]()->bool { g_TS.Initialize( baseConfig ); TestDependenciesTaskSet taskSetA; TestDependenciesTaskSet taskSetBs[8]; for( auto& task : taskSetBs ) { task.SetDependenciesVec(task.m_Dependencies,{&taskSetA}); } TestDependenciesPinnedTask pinnedTaskC; pinnedTaskC.SetDependenciesVec(pinnedTaskC.m_Dependencies, taskSetBs); TestDependenciesTaskSet taskSetDs[8]; for( auto& task : taskSetDs ) { task.SetDependenciesVec(task.m_Dependencies,{&pinnedTaskC}); } TestDependenciesTaskSet taskSetEs[4]; for( auto& task : taskSetEs ) { task.SetDependenciesVec(task.m_Dependencies,taskSetDs); } TestDependenciesCompletable finalTask; finalTask.SetDependenciesVec( finalTask.m_Dependencies,taskSetEs); g_TS.AddTaskSetToPipe( &taskSetA ); g_TS.WaitforTask( &finalTask ); // check counters int32_t lastCount = taskSetA.m_Counter; int32_t countCheck = lastCount; for( auto& task : taskSetBs ) { if( task.m_Counter < countCheck ) { fprintf( stderr,"\tERROR: enkiTS dependencies issue %d < %d at line %d\n", task.m_Counter, lastCount, __LINE__ ); return false; } lastCount = std::max( lastCount, task.m_Counter ); } countCheck = lastCount; if( pinnedTaskC.m_Counter < countCheck ) { fprintf( stderr,"\tERROR: enkiTS dependencies issue %d < %d at line %d\n", pinnedTaskC.m_Counter, lastCount, __LINE__ ); return false; lastCount = std::max( lastCount, pinnedTaskC.m_Counter ); } countCheck = lastCount; for( auto& task : taskSetDs ) { if( task.m_Counter < countCheck ) { fprintf( stderr,"\tERROR: enkiTS dependencies issue %d < %d at line %d\n", task.m_Counter, lastCount, __LINE__ ); return false; } lastCount = std::max( lastCount, task.m_Counter ); } countCheck = lastCount; for( auto& task : taskSetEs ) { if( task.m_Counter < countCheck ) { fprintf( stderr,"\tERROR: enkiTS dependencies issue %d < %d at line %d\n", task.m_Counter, lastCount, __LINE__ ); return false; } lastCount = std::max( lastCount, task.m_Counter ); } g_TS.WaitforAllAndShutdown(); return true; } ); RunTestFunction( "WaitForNewPinnedTasks", [&]()->bool { enki::TaskSchedulerConfig config = baseConfig; config.numTaskThreadsToCreate += 1; g_TS.Initialize( config ); const uint32_t PINNED_ONLY_THREAD = g_TS.GetNumTaskThreads() - 1; LambdaPinnedTask waitTask( PINNED_ONLY_THREAD, []() { while( g_TS.GetIsWaitforAllCalled() ) { g_TS.WaitForNewPinnedTasks(); g_TS.RunPinnedTasks(); } } ); g_TS.AddPinnedTask( &waitTask ); PinnedTask pinnedTask; pinnedTask.threadNum = PINNED_ONLY_THREAD; g_TS.AddPinnedTask( &pinnedTask ); g_TS.WaitforTask( &pinnedTask ); fprintf( stdout,"\tPinned task ran on thread %u, requested thread %u\n", pinnedTask.threadRunOn, pinnedTask.threadNum ); if( pinnedTask.threadRunOn != pinnedTask.threadNum ) { return false; } g_TS.WaitforAll(); // force all tasks to end, waitTask should exit because we use GetIsWaitforAllCalled() g_TS.AddPinnedTask( &waitTask ); g_TS.AddPinnedTask( &pinnedTask ); g_TS.WaitforTask( &pinnedTask ); fprintf( stdout,"\tPinned task ran on thread %u, requested thread %u\n", pinnedTask.threadRunOn, pinnedTask.threadNum ); g_TS.WaitforAllAndShutdown(); return pinnedTask.threadRunOn == pinnedTask.threadNum; } ); fprintf( stdout, "\n%u Tests Run\n%u Tests Succeeded\n\n", g_numTestsRun, g_numTestsSucceeded ); if( g_numTestsRun == g_numTestsSucceeded ) { fprintf( stdout, "All tests SUCCEEDED\n" ); } else { fprintf( stderr, "%u tests FAILED\n", g_numTestsRun - g_numTestsSucceeded ); return 1; } return 0; }
8,158
1,738
/* * All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or * its licensors. * * For complete copyright and license terms please see the LICENSE at the root of this * distribution (the "License"). All use of this software is governed by the License, * or, if provided, by the license below or the license accompanying this file. Do not * remove or modify any license notices. This file is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * */ #include "EditorUI_QT_Precompiled.h" #include "./ContextMenu.h" #include <ContextMenu.moc> #include <QPainter> #include <QStyle> #include <QStyleOption> #include <QEvent> #include <QGraphicsDropShadowEffect> #include <QMenuBar> #include <QDebug> ContextMenu::ContextMenu(QWidget* p) : QMenu(nullptr) { Initialize(); // do not set the parent, otherwise we will get the broken stylesheet // but delete with it // this behaviour can be changed again, once ParticleEditor is restyled connect(p, &QObject::destroyed, this, &QObject::deleteLater); } ContextMenu::ContextMenu(const QString& title, QWidget* parent) : QMenu(title, nullptr) { Initialize(); // do not set the parent, otherwise we will get the broken stylesheet // but delete with it // this behaviour can be changed again, once ParticleEditor is restyled connect(parent, &QObject::destroyed, this, &QObject::deleteLater); } void ContextMenu::Initialize() { setWindowFlags(windowFlags() | Qt::FramelessWindowHint); setAttribute(Qt::WA_TranslucentBackground, true); QGraphicsDropShadowEffect* effect = new QGraphicsDropShadowEffect(); effect->setBlurRadius(5); setGraphicsEffect(effect); }
541
1,433
/****************************************************************** * * Copyright 2014 Samsung Electronics All Rights Reserved. * * * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ******************************************************************/ #include <bluetooth.h> #include <bluetooth_type.h> #include <bluetooth_product.h> #include "cableserver.h" #include <pthread.h> #include "cacommon.h" #include "caadapterutils.h" #include <gio/gio.h> #include "camutex.h" #include "caqueueingthread.h" #include "caadapterutils.h" #include "cafragmentation.h" #include "cagattservice.h" #include "cableutil.h" #include "oic_string.h" #include "oic_malloc.h" /** * @def TZ_BLE_SERVER_TAG * @brief Logging tag for module name */ #define TZ_BLE_SERVER_TAG "TZ_BLE_GATT_SERVER" /** * @def CA_BLE_INITIAL_BUF_SIZE * @brief Initial buffer size for Gatt Server. */ #define CA_BLE_INITIAL_BUF_SIZE 512 /** * @var g_gattSvcPath * @brief attribute handler for OIC server attribute. */ static char *g_gattSvcPath = NULL; /** * @var g_gattReadCharPath * @brief attribute handler for readCharacteristic of OIC server */ static char *g_gattReadCharPath = NULL; /** * @var g_gattWriteCharPath * @brief attribute handler for writeCharacteristic of OIC server */ static char *g_gattWriteCharPath = NULL; /** * @var g_hAdvertiser * @brief handler for OIC advertiser. */ static bt_advertiser_h g_hAdvertiser = NULL; /** * @var g_bleServerDataReceivedCallback * @brief Maintains the callback to be notified on receival of network packets from other * BLE devices */ static CABLEDataReceivedCallback g_bleServerDataReceivedCallback = NULL; /** * @var g_serverErrorCallback * @brief callback to update the error to le adapter */ static CABLEErrorHandleCallback g_serverErrorCallback; /** * @var g_isBleGattServerStarted * @brief Boolean variable to keep the state of the GATTServer */ static bool g_isBleGattServerStarted = false; /** * @var g_bleServerStateMutex * @brief Mutex to synchronize the calls to be done to the platform from GATTServer * interfaces from different threads. */ static ca_mutex g_bleServerStateMutex = NULL; /** * @var g_bleCharacteristicMutex * @brief Mutex to synchronize writing operations on the characteristics. */ static ca_mutex g_bleCharacteristicMutex = NULL; /** * @var g_bleServiceMutex * @brief Mutex to synchronize to create the OIC service.. */ static ca_mutex g_bleServiceMutex = NULL; /** * @var g_bleReqRespCbMutex * @brief Mutex to synchronize access to the requestResponse callback to be called * when the data needs to be sent from GATTClient. */ static ca_mutex g_bleReqRespCbMutex = NULL; /** * @var g_bleServerThreadPoolMutex * @brief Mutex to synchronize the task to be pushed to thread pool. */ static ca_mutex g_bleServerThreadPoolMutex = NULL; /** * @var g_eventLoop * @brief gmainLoop to manage the threads to receive the callback from the platfrom. */ static GMainLoop *g_eventLoop = NULL; /** * @var g_bleServerThreadPool * @brief reference to threadpool */ static ca_thread_pool_t g_bleServerThreadPool = NULL; void CABleGattServerConnectionStateChangedCb(int result, bool connected, const char *remoteAddress, void *userData) { OIC_LOG(DEBUG, TZ_BLE_SERVER_TAG, "IN"); OIC_LOG_V(DEBUG, TZ_BLE_SERVER_TAG, "CABleGattConnectionStateChangedCb result[%d]", result); VERIFY_NON_NULL_VOID(remoteAddress, TZ_BLE_SERVER_TAG, "remote address is NULL"); if (connected) { OIC_LOG_V(DEBUG, TZ_BLE_SERVER_TAG, "Connected to [%s]", remoteAddress); } OIC_LOG(DEBUG, TZ_BLE_SERVER_TAG, "OUT"); } CAResult_t CAStartLEGattServer() { OIC_LOG(DEBUG, TZ_BLE_SERVER_TAG, "IN"); CAResult_t ret = CAInitGattServerMutexVariables(); if (CA_STATUS_OK != ret ) { OIC_LOG(ERROR, TZ_BLE_SERVER_TAG, "CAInitGattServerMutexVariables failed!"); CATerminateGattServerMutexVariables(); return CA_SERVER_NOT_STARTED; } ca_mutex_lock(g_bleServerThreadPoolMutex); if (NULL == g_bleServerThreadPool) { OIC_LOG(ERROR, TZ_BLE_SERVER_TAG, "g_bleServerThreadPool is NULL"); ca_mutex_unlock(g_bleServerThreadPoolMutex); return CA_STATUS_FAILED; } ret = ca_thread_pool_add_task(g_bleServerThreadPool, CAStartBleGattServerThread, NULL); if (CA_STATUS_OK != ret) { OIC_LOG_V(ERROR, TZ_BLE_SERVER_TAG, "ca_thread_pool_add_task failed with ret [%d]", ret); ca_mutex_unlock(g_bleServerThreadPoolMutex); return CA_STATUS_FAILED; } ca_mutex_unlock(g_bleServerThreadPoolMutex); OIC_LOG(DEBUG, TZ_BLE_SERVER_TAG, "OUT"); return CA_STATUS_OK; } void CAStartBleGattServerThread(void *data) { OIC_LOG(DEBUG, TZ_BLE_SERVER_TAG, "IN"); ca_mutex_lock(g_bleServerStateMutex); if (true == g_isBleGattServerStarted) { OIC_LOG(ERROR, TZ_BLE_SERVER_TAG, "Gatt Server is already running"); ca_mutex_unlock(g_bleServerStateMutex); CATerminateLEGattServer(); return; } CAResult_t ret = CAInitBleGattService(); if (CA_STATUS_OK != ret ) { OIC_LOG(ERROR, TZ_BLE_SERVER_TAG, "_bt_gatt_init_service failed"); ca_mutex_unlock(g_bleServerStateMutex); CATerminateLEGattServer(); return; } sleep(5); // Sleep is must because of the platform issue. char *serviceUUID = CA_GATT_SERVICE_UUID; ret = CAAddNewBleServiceInGattServer(serviceUUID); if (CA_STATUS_OK != ret ) { OIC_LOG(ERROR, TZ_BLE_SERVER_TAG, "CAAddNewBleServiceInGattServer failed"); ca_mutex_unlock(g_bleServerStateMutex); CATerminateLEGattServer(); return; } static const char charReadUUID[] = CA_GATT_RESPONSE_CHRC_UUID; uint8_t charReadValue[] = {33, 44, 55, 66}; // These are initial random values ret = CAAddNewCharacteristicsToGattServer(g_gattSvcPath, charReadUUID, charReadValue, CA_BLE_INITIAL_BUF_SIZE, true); // For Read Characteristics. if (CA_STATUS_OK != ret ) { OIC_LOG(ERROR, TZ_BLE_SERVER_TAG, "CAAddNewCharacteristicsToGattServer failed"); ca_mutex_unlock(g_bleServerStateMutex); CATerminateLEGattServer(); return; } static const char charWriteUUID[] = CA_GATT_REQUEST_CHRC_UUID; uint8_t charWriteValue[] = {33, 44, 55, 66}; // These are initial random values ret = CAAddNewCharacteristicsToGattServer(g_gattSvcPath, charWriteUUID, charWriteValue, CA_BLE_INITIAL_BUF_SIZE, false); // For Write Characteristics. if (CA_STATUS_OK != ret ) { OIC_LOG(ERROR, TZ_BLE_SERVER_TAG, "CAAddNewCharacteristicsToGattServer failed"); ca_mutex_unlock(g_bleServerStateMutex); CATerminateLEGattServer(); return; } ret = CARegisterBleServicewithGattServer(g_gattSvcPath); if (CA_STATUS_OK != ret ) { OIC_LOG(ERROR, TZ_BLE_SERVER_TAG, "CARegisterBleServicewithGattServer failed"); ca_mutex_unlock(g_bleServerStateMutex); CATerminateLEGattServer(); return; } int res = bt_gatt_set_connection_state_changed_cb(CABleGattServerConnectionStateChangedCb, NULL); if (BT_ERROR_NONE != res) { OIC_LOG_V(ERROR, TZ_BLE_SERVER_TAG, "bt_gatt_set_connection_state_changed_cb Failed with return as [%s]", CABTGetErrorMsg(res)); return; } bt_adapter_le_create_advertiser(&g_hAdvertiser); if (NULL == g_hAdvertiser) { OIC_LOG(ERROR, TZ_BLE_SERVER_TAG, "g_hAdvertiser is NULL"); ca_mutex_unlock(g_bleServerStateMutex); CATerminateLEGattServer(); return; } res = bt_adapter_le_start_advertising(g_hAdvertiser, NULL, NULL, NULL); if (BT_ERROR_NONE != res) { OIC_LOG_V(DEBUG, TZ_BLE_SERVER_TAG, "bt_adapter_le_start_advertising failed with ret [%d] ", res); ca_mutex_unlock(g_bleServerStateMutex); CATerminateLEGattServer(); return; } g_isBleGattServerStarted = true; ca_mutex_unlock(g_bleServerStateMutex); OIC_LOG(DEBUG, TZ_BLE_SERVER_TAG, "LE Server initialization complete."); GMainContext *thread_context = NULL; thread_context = g_main_context_new(); g_eventLoop = g_main_loop_new(thread_context, FALSE); g_main_context_push_thread_default(thread_context); g_main_loop_run(g_eventLoop); OIC_LOG(DEBUG, TZ_BLE_SERVER_TAG, "OUT"); } CAResult_t CAStopLEGattServer() { OIC_LOG(DEBUG, TZ_BLE_SERVER_TAG, "IN"); ca_mutex_lock(g_bleServerStateMutex); if (false == g_isBleGattServerStarted) { OIC_LOG(ERROR, TZ_BLE_SERVER_TAG, "Gatt Server is not running to stop"); ca_mutex_unlock(g_bleServerStateMutex); return CA_STATUS_OK; } g_isBleGattServerStarted = false; if (NULL != g_hAdvertiser ) { int ret = 0; ret = bt_adapter_le_stop_advertising(g_hAdvertiser); if (0 != ret) { OIC_LOG_V(ERROR, TZ_BLE_SERVER_TAG, "bt_adapter_le_stop_advertising failed with ret [%d]", ret); } ret = bt_adapter_le_destroy_advertiser(g_hAdvertiser); if (0 != ret) { OIC_LOG_V(ERROR, TZ_BLE_SERVER_TAG, "bt_adapter_le_destroy_advertiser failed with ret [%d]", ret); } g_hAdvertiser = NULL; } CAResult_t res = CARemoveAllBleServicesFromGattServer(); if (CA_STATUS_OK != res) { OIC_LOG(ERROR, TZ_BLE_SERVER_TAG, "removeAllBleServicesFromGattServer failed"); } res = CADeInitBleGattService(); if (CA_STATUS_OK != res) { OIC_LOG_V(ERROR, TZ_BLE_SERVER_TAG, "_bt_gatt_deinit_service failed with ret [%d]", res); } GMainContext *context_event_loop = NULL; // Required for waking up the thread which is running in gmain loop if (NULL != g_eventLoop) { context_event_loop = g_main_loop_get_context(g_eventLoop); if (context_event_loop) { OIC_LOG_V(DEBUG, TZ_BLE_SERVER_TAG, "g_eventLoop context %x", context_event_loop); g_main_context_wakeup(context_event_loop); // Kill g main loops and kill threads g_main_loop_quit(g_eventLoop); } } else { OIC_LOG(ERROR, TZ_BLE_SERVER_TAG, "g_eventLoop context is NULL"); } ca_mutex_unlock(g_bleServerStateMutex); OIC_LOG(DEBUG, TZ_BLE_SERVER_TAG, "OUT"); return CA_STATUS_OK; } void CATerminateLEGattServer() { OIC_LOG(DEBUG, TZ_BLE_SERVER_TAG, "IN"); ca_mutex_lock(g_bleServerStateMutex); // free service Path(unique identifier for ble service) ca_mutex_lock(g_bleServiceMutex); OICFree(g_gattSvcPath); g_gattSvcPath = NULL; ca_mutex_unlock(g_bleServiceMutex); // freeing characteristics ca_mutex_lock(g_bleCharacteristicMutex); OICFree(g_gattReadCharPath); g_gattReadCharPath = NULL; OICFree(g_gattWriteCharPath); g_gattWriteCharPath = NULL; ca_mutex_unlock(g_bleCharacteristicMutex); ca_mutex_unlock(g_bleServerStateMutex); // Terminating all mutex variables. CATerminateGattServerMutexVariables(); OIC_LOG(DEBUG, TZ_BLE_SERVER_TAG, "OUT"); } CAResult_t CAInitGattServerMutexVariables() { OIC_LOG(DEBUG, TZ_BLE_SERVER_TAG, "IN"); if (NULL == g_bleServerStateMutex) { g_bleServerStateMutex = ca_mutex_new(); if (NULL == g_bleServerStateMutex) { OIC_LOG(ERROR, TZ_BLE_SERVER_TAG, "ca_mutex_new failed"); return CA_STATUS_FAILED; } } if (NULL == g_bleServiceMutex) { g_bleServiceMutex = ca_mutex_new(); if (NULL == g_bleServiceMutex) { OIC_LOG(ERROR, TZ_BLE_SERVER_TAG, "ca_mutex_new failed"); return CA_STATUS_FAILED; } } if (NULL == g_bleCharacteristicMutex) { g_bleCharacteristicMutex = ca_mutex_new(); if (NULL == g_bleCharacteristicMutex) { OIC_LOG(ERROR, TZ_BLE_SERVER_TAG, "ca_mutex_new failed"); return CA_STATUS_FAILED; } } if (NULL == g_bleReqRespCbMutex) { g_bleReqRespCbMutex = ca_mutex_new(); if (NULL == g_bleReqRespCbMutex) { OIC_LOG(ERROR, TZ_BLE_SERVER_TAG, "ca_mutex_new failed"); return CA_STATUS_FAILED; } } OIC_LOG(DEBUG, TZ_BLE_SERVER_TAG, "OUT"); return CA_STATUS_OK; } void CATerminateGattServerMutexVariables() { OIC_LOG(DEBUG, TZ_BLE_SERVER_TAG, "IN"); ca_mutex_free(g_bleServerStateMutex); g_bleServerStateMutex = NULL; g_bleServerStateMutex = NULL; ca_mutex_free(g_bleServiceMutex); g_bleServiceMutex = NULL; ca_mutex_free(g_bleCharacteristicMutex); g_bleCharacteristicMutex = NULL; ca_mutex_free(g_bleReqRespCbMutex); g_bleReqRespCbMutex = NULL; OIC_LOG(DEBUG, TZ_BLE_SERVER_TAG, "OUT"); } CAResult_t CAInitBleGattService() { OIC_LOG(DEBUG, TZ_BLE_SERVER_TAG, "IN"); int ret = _bt_gatt_init_service(); if (0 != ret) { OIC_LOG_V(ERROR, TZ_BLE_SERVER_TAG, "_bt_gatt_deinit_service failed with ret [%d]", ret); return CA_STATUS_FAILED; } OIC_LOG(DEBUG, TZ_BLE_SERVER_TAG, "OUT"); return CA_STATUS_OK; } CAResult_t CADeInitBleGattService() { OIC_LOG(DEBUG, TZ_BLE_SERVER_TAG, "IN"); int ret = _bt_gatt_deinit_service(); if (0 != ret) { OIC_LOG_V(ERROR, TZ_BLE_SERVER_TAG, "_bt_gatt_deinit_service failed with ret [%d]", ret); return CA_STATUS_FAILED; } OIC_LOG(DEBUG, TZ_BLE_SERVER_TAG, "OUT"); return CA_STATUS_OK; } void CASetLEServerThreadPoolHandle(ca_thread_pool_t handle) { OIC_LOG(DEBUG, TZ_BLE_SERVER_TAG, "IN"); ca_mutex_lock(g_bleServerThreadPoolMutex); g_bleServerThreadPool = handle; ca_mutex_unlock(g_bleServerThreadPoolMutex); OIC_LOG(DEBUG, TZ_BLE_SERVER_TAG, "OUT"); } CAResult_t CAAddNewBleServiceInGattServer(const char *serviceUUID) { OIC_LOG(DEBUG, TZ_BLE_SERVER_TAG, "IN"); VERIFY_NON_NULL(serviceUUID, TZ_BLE_SERVER_TAG, "Param serviceUUID is NULL"); OIC_LOG_V(DEBUG, TZ_BLE_SERVER_TAG, "service uuid %s", serviceUUID); char *svcPath = NULL; int ret = bt_gatt_add_service(serviceUUID, &svcPath); if (0 != ret) { OIC_LOG_V(ERROR, TZ_BLE_SERVER_TAG, "bt_gatt_add_service failed with ret [%d]", ret); return CA_STATUS_FAILED; } if (NULL != svcPath) { OIC_LOG_V(ERROR, TZ_BLE_SERVER_TAG, "AddNewBleServiceInGattServer ServicePath obtained is %s", svcPath); ca_mutex_lock(g_bleServiceMutex); if (NULL != g_gattSvcPath) { OICFree(g_gattSvcPath); g_gattSvcPath = NULL; } g_gattSvcPath = svcPath; ca_mutex_unlock(g_bleServiceMutex); } OIC_LOG(DEBUG, TZ_BLE_SERVER_TAG, "OUT"); return CA_STATUS_OK; } CAResult_t CARemoveBleServiceFromGattServer(const char *svcPath) { OIC_LOG(DEBUG, TZ_BLE_SERVER_TAG, "IN"); VERIFY_NON_NULL(svcPath, TZ_BLE_SERVER_TAG, "Param svcPath is NULL"); int ret = bt_gatt_remove_service(svcPath); if (0 != ret) { OIC_LOG_V(ERROR, TZ_BLE_SERVER_TAG, "bt_gatt_remove_service failed [%d]", ret); return CA_STATUS_FAILED; } OIC_LOG(DEBUG, TZ_BLE_SERVER_TAG, "OUT"); return CA_STATUS_OK; } CAResult_t CARemoveAllBleServicesFromGattServer() { OIC_LOG(DEBUG, TZ_BLE_SERVER_TAG, "IN"); int ret = bt_gatt_delete_services(); if (0 != ret) { OIC_LOG_V(ERROR, TZ_BLE_SERVER_TAG, "bt_gatt_delete_services failed with ret [%d]", ret); return CA_STATUS_FAILED; } OIC_LOG(DEBUG, TZ_BLE_SERVER_TAG, "OUT"); return CA_STATUS_OK; } void CABleGattRemoteCharacteristicWriteCb(char *charPath, unsigned char *charValue, int charValueLen, const char *remoteAddress, void *userData) { OIC_LOG(DEBUG, TZ_BLE_SERVER_TAG, "IN"); if (NULL == charPath || NULL == charValue || NULL == remoteAddress) { OIC_LOG(ERROR, TZ_BLE_SERVER_TAG, "Param callback values are NULL"); return; } OIC_LOG_V(DEBUG, TZ_BLE_SERVER_TAG, "charPath = [%s] charValue = [%p] len [%d]", charPath, charValue, charValueLen); uint8_t *data = OICMalloc(charValueLen); if (NULL == data) { OIC_LOG(ERROR, TZ_BLE_SERVER_TAG, "Malloc failed!"); return; } memcpy(data, charValue, charValueLen); ca_mutex_lock(g_bleReqRespCbMutex); if (NULL == g_bleServerDataReceivedCallback) { OIC_LOG(ERROR, TZ_BLE_SERVER_TAG, "gReqRespCallback is NULL!"); ca_mutex_unlock(g_bleReqRespCbMutex); OICFree(data); return; } OIC_LOG(DEBUG, TZ_BLE_SERVER_TAG, "Sending data up !"); uint32_t sentLength = 0; g_bleServerDataReceivedCallback(remoteAddress, data, charValueLen, &sentLength); ca_mutex_unlock(g_bleReqRespCbMutex); OICFree(data); OIC_LOG(DEBUG, TZ_BLE_SERVER_TAG, "OUT"); } CAResult_t CARegisterBleServicewithGattServer(const char *svcPath) { OIC_LOG(DEBUG, TZ_BLE_SERVER_TAG, "IN"); VERIFY_NON_NULL(svcPath, TZ_BLE_SERVER_TAG, "Param svcPath is NULL"); OIC_LOG_V(DEBUG, TZ_BLE_SERVER_TAG, "svcPath:%s", svcPath); int ret = bt_gatt_register_service(svcPath, CABleGattRemoteCharacteristicWriteCb, NULL); if (0 != ret) { OIC_LOG_V(ERROR, TZ_BLE_SERVER_TAG, "bt_gatt_register_service failed with ret [%d]", ret); return CA_STATUS_FAILED; } OIC_LOG(DEBUG, TZ_BLE_SERVER_TAG, "OUT"); return CA_STATUS_OK; } CAResult_t CAAddNewCharacteristicsToGattServer(const char *svcPath, const char *charUUID, const uint8_t *charValue, int charValueLen, bool read) { OIC_LOG(DEBUG, TZ_BLE_SERVER_TAG, "IN"); const char *charFlags[1]; if(read) { charFlags[0] = "notify"; } else { charFlags[0] = "write-without-response"; } size_t flagLen = sizeof(charFlags) / sizeof(charFlags[0]); char *charPath = NULL; int ret = bt_gatt_add_characteristic(charUUID, (const char *) charValue, charValueLen, charFlags, flagLen, svcPath, &charPath); if (0 != ret || NULL == charPath) { OIC_LOG_V(ERROR, TZ_BLE_SERVER_TAG, "bt_gatt_add_characteristic failed with ret [%d]", ret); return CA_STATUS_FAILED; } OIC_LOG_V(DEBUG, TZ_BLE_SERVER_TAG, "bt_gatt_add_characteristic charPath obtained: %s", charPath); ca_mutex_lock(g_bleCharacteristicMutex); if (read) { if (NULL != g_gattReadCharPath) { OICFree(g_gattReadCharPath); g_gattReadCharPath = NULL; } g_gattReadCharPath = charPath; } else { if (NULL != g_gattWriteCharPath) { OICFree(g_gattWriteCharPath); g_gattWriteCharPath = NULL; } g_gattWriteCharPath = charPath; } ca_mutex_unlock(g_bleCharacteristicMutex); OIC_LOG(DEBUG, TZ_BLE_SERVER_TAG, "OUT"); return CA_STATUS_OK; } CAResult_t CARemoveCharacteristicsFromGattServer(const char *charPath) { ///TODO: There is no api provided in bluetooth.h for removing characteristics. return CA_STATUS_OK; } CAResult_t CAUpdateCharacteristicsToGattClient(const char *address, const uint8_t *charValue, uint32_t charValueLen) { OIC_LOG(DEBUG, TZ_BLE_SERVER_TAG, "IN"); VERIFY_NON_NULL(charValue, TZ_BLE_SERVER_TAG, "Param charValue is NULL"); VERIFY_NON_NULL(address, TZ_BLE_SERVER_TAG, "Param address is NULL"); OIC_LOG_V(DEBUG, TZ_BLE_SERVER_TAG, "Client's Unicast address for sending data [%s]", address); ca_mutex_lock(g_bleCharacteristicMutex); if (NULL == g_gattReadCharPath) { OIC_LOG(ERROR, TZ_BLE_SERVER_TAG, "g_gattReadCharPath is NULL"); ca_mutex_unlock(g_bleCharacteristicMutex); return CA_STATUS_FAILED; } char *data = OICCalloc(charValueLen, 1); if (NULL == data) { OIC_LOG(ERROR, TZ_BLE_SERVER_TAG, "malloc failed!"); ca_mutex_unlock(g_bleCharacteristicMutex); return CA_STATUS_FAILED; } memcpy(data, charValue, charValueLen); // Binary data OIC_LOG_V(DEBUG, TZ_BLE_SERVER_TAG, "updating characteristics char [%s] data [%p] dataLen [%u]", (const char *)g_gattReadCharPath, data, charValueLen); int ret = bt_gatt_update_characteristic(g_gattReadCharPath, data, charValueLen, address); if (0 != ret) { OIC_LOG_V(ERROR, TZ_BLE_SERVER_TAG, "bt_gatt_update_characteristic failed with return [%d]", ret); OICFree(data); ca_mutex_unlock(g_bleCharacteristicMutex); return CA_STATUS_FAILED; } OICFree(data); ca_mutex_unlock(g_bleCharacteristicMutex); OIC_LOG(ERROR, TZ_BLE_SERVER_TAG, "OUT"); return CA_STATUS_OK; } CAResult_t CAUpdateCharacteristicsToAllGattClients(const uint8_t *charValue, uint32_t charValueLen) { OIC_LOG(DEBUG, TZ_BLE_SERVER_TAG, "IN"); VERIFY_NON_NULL(charValue, TZ_BLE_SERVER_TAG, "Param charValue is NULL"); ca_mutex_lock(g_bleCharacteristicMutex); if (NULL == g_gattReadCharPath) { OIC_LOG(ERROR, TZ_BLE_SERVER_TAG, "g_gattReadCharPath is NULL"); ca_mutex_unlock(g_bleCharacteristicMutex); return CA_STATUS_FAILED; } char *data = OICMalloc(charValueLen); if (NULL == data) { OIC_LOG(ERROR, TZ_BLE_SERVER_TAG, "malloc failed!"); ca_mutex_unlock(g_bleCharacteristicMutex); return CA_STATUS_FAILED; } memcpy(data, charValue, charValueLen); // Binary data OIC_LOG_V(DEBUG, TZ_BLE_SERVER_TAG, "updating characteristics char [%s] data [%p] dataLen [%u]", (const char *)g_gattReadCharPath, data, charValueLen); int ret = bt_gatt_update_characteristic(g_gattReadCharPath, data, charValueLen, NULL); if (0 != ret) { OIC_LOG_V(ERROR, TZ_BLE_SERVER_TAG, "bt_gatt_update_characteristic failed with return [%d]", ret); OICFree(data); ca_mutex_unlock(g_bleCharacteristicMutex); return CA_STATUS_FAILED; } OICFree(data); ca_mutex_unlock(g_bleCharacteristicMutex); OIC_LOG(ERROR, TZ_BLE_SERVER_TAG, "OUT"); return CA_STATUS_OK; } void CASetLEReqRespServerCallback(CABLEDataReceivedCallback callback) { OIC_LOG(DEBUG, TZ_BLE_SERVER_TAG, "IN"); ca_mutex_lock(g_bleReqRespCbMutex); g_bleServerDataReceivedCallback = callback; ca_mutex_unlock(g_bleReqRespCbMutex); OIC_LOG(DEBUG, TZ_BLE_SERVER_TAG, "OUT"); } void CASetBLEServerErrorHandleCallback(CABLEErrorHandleCallback callback) { g_serverErrorCallback = callback; }
11,484
570
/* * Copyright (C) 2014 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.example.android.customtransition; import com.example.android.common.logger.Log; import android.content.Context; import android.os.Bundle; import androidx.annotation.NonNull; import androidx.fragment.app.Fragment; import android.transition.Scene; import android.transition.Transition; import android.transition.TransitionManager; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.FrameLayout; public class CustomTransitionFragment extends Fragment implements View.OnClickListener { private static final String STATE_CURRENT_SCENE = "current_scene"; /** Tag for the logger */ private static final String TAG = "CustomTransitionFragment"; /** These are the Scenes we use. */ private Scene[] mScenes; /** The current index for mScenes. */ private int mCurrentScene; /** This is the custom Transition we use in this sample. */ private Transition mTransition; public CustomTransitionFragment() { } @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { return inflater.inflate(R.layout.fragment_custom_transition, container, false); } @Override public void onViewCreated(View view, Bundle savedInstanceState) { Context context = getActivity(); FrameLayout container = (FrameLayout) view.findViewById(R.id.container); view.findViewById(R.id.show_next_scene).setOnClickListener(this); if (null != savedInstanceState) { mCurrentScene = savedInstanceState.getInt(STATE_CURRENT_SCENE); } // We set up the Scenes here. mScenes = new Scene[] { Scene.getSceneForLayout(container, R.layout.scene1, context), Scene.getSceneForLayout(container, R.layout.scene2, context), Scene.getSceneForLayout(container, R.layout.scene3, context), }; // This is the custom Transition. mTransition = new ChangeColor(); // Show the initial Scene. TransitionManager.go(mScenes[mCurrentScene % mScenes.length]); } @Override public void onSaveInstanceState(@NonNull Bundle outState) { super.onSaveInstanceState(outState); outState.putInt(STATE_CURRENT_SCENE, mCurrentScene); } @Override public void onClick(View v) { if (v.getId() == R.id.show_next_scene) { mCurrentScene = (mCurrentScene + 1) % mScenes.length; Log.i(TAG, "Transitioning to scene #" + mCurrentScene); // Pass the custom Transition as second argument for TransitionManager.go TransitionManager.go(mScenes[mCurrentScene], mTransition); } } }
1,156
368
// Copyright 2016 Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package com.google.archivepatcher.generator.similarity; import com.google.archivepatcher.generator.MinimalZipEntry; import java.io.File; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; /** * Detects identical files on the basis of the CRC32 of uncompressed content. All entries that have * the same CRC32 will be identified as similar (and presumably are identical, in the absence of * hash collisions). */ public class Crc32SimilarityFinder extends SimilarityFinder { /** * All entries in the base archive, organized by CRC32. */ private final Map<Long, List<MinimalZipEntry>> baseEntriesByCrc32 = new HashMap<>(); /** * Constructs a new similarity finder with the specified parameters. * @param baseArchive the base archive that contains the entries to be searched * @param baseEntries the entries in the base archive that are eligible to be searched */ public Crc32SimilarityFinder(File baseArchive, Collection<MinimalZipEntry> baseEntries) { super(baseArchive, baseEntries); for (MinimalZipEntry oldEntry : baseEntries) { long crc32 = oldEntry.getCrc32OfUncompressedData(); List<MinimalZipEntry> entriesForCrc32 = baseEntriesByCrc32.get(crc32); if (entriesForCrc32 == null) { entriesForCrc32 = new LinkedList<>(); baseEntriesByCrc32.put(crc32, entriesForCrc32); } entriesForCrc32.add(oldEntry); } } @Override public List<MinimalZipEntry> findSimilarFiles(File newArchive, MinimalZipEntry newEntry) { List<MinimalZipEntry> matchedEntries = baseEntriesByCrc32.get(newEntry.getCrc32OfUncompressedData()); if (matchedEntries == null) { return Collections.emptyList(); } return Collections.unmodifiableList(matchedEntries); } }
778
19,127
<gh_stars>1000+ /* * Copyright 2015-2021 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the Apache License 2.0 (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include <stdio.h> #include <string.h> #include <openssl/crypto.h> #include <openssl/bio.h> #include <openssl/x509.h> #include <openssl/pem.h> #include <openssl/err.h> #include "testutil.h" static const char *root_f; static const char *roots_f; static const char *untrusted_f; static const char *bad_f; static const char *req_f; #define load_cert_from_file(file) load_cert_pem(file, NULL) /*- * Test for CVE-2015-1793 (Alternate Chains Certificate Forgery) * * Chain is as follows: * * rootCA (self-signed) * | * interCA * | * subinterCA subinterCA (self-signed) * | | * leaf ------------------ * | * bad * * rootCA, interCA, subinterCA, subinterCA (ss) all have CA=TRUE * leaf and bad have CA=FALSE * * subinterCA and subinterCA (ss) have the same subject name and keys * * interCA (but not rootCA) and subinterCA (ss) are in the trusted store * (roots.pem) * leaf and subinterCA are in the untrusted list (untrusted.pem) * bad is the certificate being verified (bad.pem) * * Versions vulnerable to CVE-2015-1793 will fail to detect that leaf has * CA=FALSE, and will therefore incorrectly verify bad * */ static int test_alt_chains_cert_forgery(void) { int ret = 0; int i; X509 *x = NULL; STACK_OF(X509) *untrusted = NULL; X509_STORE_CTX *sctx = NULL; X509_STORE *store = NULL; X509_LOOKUP *lookup = NULL; store = X509_STORE_new(); if (store == NULL) goto err; lookup = X509_STORE_add_lookup(store, X509_LOOKUP_file()); if (lookup == NULL) goto err; if (!X509_LOOKUP_load_file(lookup, roots_f, X509_FILETYPE_PEM)) goto err; untrusted = load_certs_pem(untrusted_f); if ((x = load_cert_from_file(bad_f)) == NULL) goto err; sctx = X509_STORE_CTX_new(); if (sctx == NULL) goto err; if (!X509_STORE_CTX_init(sctx, store, x, untrusted)) goto err; i = X509_verify_cert(sctx); if (i == 0 && X509_STORE_CTX_get_error(sctx) == X509_V_ERR_INVALID_CA) { /* This is the result we were expecting: Test passed */ ret = 1; } err: X509_STORE_CTX_free(sctx); X509_free(x); OSSL_STACK_OF_X509_free(untrusted); X509_STORE_free(store); return ret; } OPT_TEST_DECLARE_USAGE("roots.pem untrusted.pem bad.pem\n") static int test_distinguishing_id(void) { X509 *x = NULL; int ret = 0; ASN1_OCTET_STRING *v = NULL, *v2 = NULL; char *distid = "this is an ID"; x = load_cert_from_file(bad_f); if (x == NULL) goto err; v = ASN1_OCTET_STRING_new(); if (v == NULL) goto err; if (!ASN1_OCTET_STRING_set(v, (unsigned char *)distid, (int)strlen(distid))) { ASN1_OCTET_STRING_free(v); goto err; } X509_set0_distinguishing_id(x, v); v2 = X509_get0_distinguishing_id(x); if (!TEST_ptr(v2) || !TEST_int_eq(ASN1_OCTET_STRING_cmp(v, v2), 0)) goto err; ret = 1; err: X509_free(x); return ret; } static int test_req_distinguishing_id(void) { X509_REQ *x = NULL; BIO *bio = NULL; int ret = 0; ASN1_OCTET_STRING *v = NULL, *v2 = NULL; char *distid = "this is an ID"; bio = BIO_new_file(req_f, "r"); if (bio == NULL) goto err; x = PEM_read_bio_X509_REQ(bio, NULL, 0, NULL); if (x == NULL) goto err; v = ASN1_OCTET_STRING_new(); if (v == NULL) goto err; if (!ASN1_OCTET_STRING_set(v, (unsigned char *)distid, (int)strlen(distid))) { ASN1_OCTET_STRING_free(v); goto err; } X509_REQ_set0_distinguishing_id(x, v); v2 = X509_REQ_get0_distinguishing_id(x); if (!TEST_ptr(v2) || !TEST_int_eq(ASN1_OCTET_STRING_cmp(v, v2), 0)) goto err; ret = 1; err: X509_REQ_free(x); BIO_free(bio); return ret; } static int test_self_signed(const char *filename, int use_trusted, int expected) { X509 *cert = load_cert_from_file(filename); /* may result in NULL */ STACK_OF(X509) *trusted = sk_X509_new_null(); X509_STORE_CTX *ctx = X509_STORE_CTX_new(); int ret; ret = TEST_int_eq(X509_self_signed(cert, 1), expected); if (cert != NULL) { if (use_trusted) ret = ret && TEST_true(sk_X509_push(trusted, cert)); ret = ret && TEST_true(X509_STORE_CTX_init(ctx, NULL, cert, NULL)); X509_STORE_CTX_set0_trusted_stack(ctx, trusted); ret = ret && TEST_int_eq(X509_verify_cert(ctx), expected); } X509_STORE_CTX_free(ctx); sk_X509_free(trusted); X509_free(cert); return ret; } static int test_self_signed_good(void) { return test_self_signed(root_f, 1, 1); } static int test_self_signed_bad(void) { return test_self_signed(bad_f, 1, 0); } static int test_self_signed_error(void) { return test_self_signed("nonexistent file name", 1, -1); } static int test_store_ctx(void) { /* Verifying a cert where we have no trusted certs should fail */ return test_self_signed(bad_f, 0, 0); } int setup_tests(void) { if (!test_skip_common_options()) { TEST_error("Error parsing test options\n"); return 0; } if (!TEST_ptr(root_f = test_get_argument(0)) || !TEST_ptr(roots_f = test_get_argument(1)) || !TEST_ptr(untrusted_f = test_get_argument(2)) || !TEST_ptr(bad_f = test_get_argument(3)) || !TEST_ptr(req_f = test_get_argument(4))) return 0; ADD_TEST(test_alt_chains_cert_forgery); ADD_TEST(test_store_ctx); ADD_TEST(test_distinguishing_id); ADD_TEST(test_req_distinguishing_id); ADD_TEST(test_self_signed_good); ADD_TEST(test_self_signed_bad); ADD_TEST(test_self_signed_error); return 1; }
2,865
521
#include <iostream> #include <elle/printf.hh> #include <elle/network/Interface.hh> // FIXME: How is this a "test" ?? int main() { auto interfaces = elle::network::Interface::get_map( elle::network::Interface::Filter::only_up | elle::network::Interface::Filter::no_loopback ); for (auto const& pair: interfaces) { std::cout << "IP: " << pair.second.ipv4_address << std::endl << "MAC: " << pair.second.mac_address << std::endl; } std::cout << "tests done." << std::endl; return (0); }
216
3,301
<reponame>okjay/Alink package com.alibaba.alink.operator.common.io.types; import org.apache.flink.api.common.typeinfo.BasicTypeInfo; import org.apache.flink.api.common.typeinfo.PrimitiveArrayTypeInfo; import org.apache.flink.api.common.typeinfo.SqlTimeTypeInfo; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.typeutils.ObjectArrayTypeInfo; import org.apache.flink.util.Preconditions; import java.sql.Types; import java.util.Collections; import java.util.HashMap; import java.util.Map; /** * Type conversion utils against {@link java.sql.Types}. * * There're two special cases in which Types.LONGVARCHAR and Types.NULL are mapped to String. */ public class JdbcTypeConverter { /** * Mapping from {@link java.sql.Types} (in integer form) to Flink TypeInformation. */ private static final Map <Integer, TypeInformation <?>> MAP_INDEX_TO_FLINK_TYPE; /** * Mapping from Flink TypeInformation to {@link java.sql.Types} integers. */ private static final Map <TypeInformation <?>, Integer> MAP_FLINK_TYPE_TO_INDEX; static { HashMap <TypeInformation <?>, Integer> m1 = new HashMap <>(); m1.put(BasicTypeInfo.STRING_TYPE_INFO, Types.VARCHAR); m1.put(BasicTypeInfo.BOOLEAN_TYPE_INFO, Types.BOOLEAN); m1.put(BasicTypeInfo.BYTE_TYPE_INFO, Types.TINYINT); m1.put(BasicTypeInfo.SHORT_TYPE_INFO, Types.SMALLINT); m1.put(BasicTypeInfo.INT_TYPE_INFO, Types.INTEGER); m1.put(BasicTypeInfo.LONG_TYPE_INFO, Types.BIGINT); m1.put(BasicTypeInfo.FLOAT_TYPE_INFO, Types.FLOAT); m1.put(BasicTypeInfo.DOUBLE_TYPE_INFO, Types.DOUBLE); m1.put(SqlTimeTypeInfo.DATE, Types.DATE); m1.put(SqlTimeTypeInfo.TIME, Types.TIME); m1.put(SqlTimeTypeInfo.TIMESTAMP, Types.TIMESTAMP); m1.put(BasicTypeInfo.BIG_DEC_TYPE_INFO, Types.DECIMAL); m1.put(PrimitiveArrayTypeInfo.BYTE_PRIMITIVE_ARRAY_TYPE_INFO, Types.BINARY); MAP_FLINK_TYPE_TO_INDEX = Collections.unmodifiableMap(m1); HashMap <Integer, TypeInformation <?>> m3 = new HashMap <>(); m3.put(Types.LONGVARCHAR, BasicTypeInfo.STRING_TYPE_INFO); m3.put(Types.VARCHAR, BasicTypeInfo.STRING_TYPE_INFO); m3.put(Types.NULL, BasicTypeInfo.STRING_TYPE_INFO); m3.put(Types.BOOLEAN, BasicTypeInfo.BOOLEAN_TYPE_INFO); m3.put(Types.TINYINT, BasicTypeInfo.BYTE_TYPE_INFO); m3.put(Types.SMALLINT, BasicTypeInfo.SHORT_TYPE_INFO); m3.put(Types.INTEGER, BasicTypeInfo.INT_TYPE_INFO); m3.put(Types.BIGINT, BasicTypeInfo.LONG_TYPE_INFO); m3.put(Types.FLOAT, BasicTypeInfo.FLOAT_TYPE_INFO); m3.put(Types.DOUBLE, BasicTypeInfo.DOUBLE_TYPE_INFO); m3.put(Types.DATE, SqlTimeTypeInfo.DATE); m3.put(Types.TIME, SqlTimeTypeInfo.TIME); m3.put(Types.TIMESTAMP, SqlTimeTypeInfo.TIMESTAMP); m3.put(Types.DECIMAL, BasicTypeInfo.BIG_DEC_TYPE_INFO); m3.put(Types.BINARY, PrimitiveArrayTypeInfo.BYTE_PRIMITIVE_ARRAY_TYPE_INFO); MAP_INDEX_TO_FLINK_TYPE = Collections.unmodifiableMap(m3); } /** * Get {@link java.sql.Types} (in integer form) from Flink TypeInformation. * * @param type flink TypeInformation. * @return Corresponding type integer in {@link java.sql.Types}. * @throws IllegalArgumentException when unsupported type encountered. */ public static int getIntegerSqlType(TypeInformation <?> type) { if (MAP_FLINK_TYPE_TO_INDEX.containsKey(type)) { return MAP_FLINK_TYPE_TO_INDEX.get(type); } else if (type instanceof ObjectArrayTypeInfo || type instanceof PrimitiveArrayTypeInfo) { return Types.ARRAY; } else { throw new IllegalArgumentException("Unsupported type: " + type); } } /** * Get {@link java.sql.Types} (in integer form) from Flink TypeInformation. * * @param typeIndex type integer in {@link java.sql.Types}. * @return flink TypeInformation. * @throws IllegalArgumentException when unsupported type encountered. */ public static TypeInformation <?> getFlinkType(int typeIndex) { TypeInformation <?> typeInformation = MAP_INDEX_TO_FLINK_TYPE.get(typeIndex); Preconditions.checkArgument(typeInformation != null, "Unsupported type: %s", typeIndex); return typeInformation; } }
1,549
892
{ "schema_version": "1.2.0", "id": "GHSA-mg2c-c9jj-4xgv", "modified": "2022-04-09T00:00:41Z", "published": "2022-04-03T00:01:02Z", "aliases": [ "CVE-2021-32503" ], "details": "Unauthenticated users can access sensitive web URLs through GET request, which should be restricted to maintenance users only. A malicious attacker could use this sensitive information’s to launch further attacks on the system.", "severity": [ { "type": "CVSS_V3", "score": "CVSS:3.1/AV:N/AC:L/PR:H/UI:N/S:U/C:N/I:N/A:H" } ], "affected": [ ], "references": [ { "type": "ADVISORY", "url": "https://nvd.nist.gov/vuln/detail/CVE-2021-32503" }, { "type": "WEB", "url": "https://sick.com/psirt" } ], "database_specific": { "cwe_ids": [ "CWE-400" ], "severity": "MODERATE", "github_reviewed": false } }
417
1,531
<reponame>iqianxing/ngrinder<gh_stars>1000+ /** * package containing ngrinder task scheduling. */ package org.ngrinder.infra.schedule;
49
348
<filename>docs/data/leg-t2/023/02301258.json {"nom":"Vareilles","circ":"1ère circonscription","dpt":"Creuse","inscrits":225,"abs":115,"votants":110,"blancs":19,"nuls":9,"exp":82,"res":[{"nuance":"REM","nom":"<NAME>","voix":57},{"nuance":"LR","nom":"<NAME>","voix":25}]}
107
17,318
/* * Copyright (c) 2011-2018, <NAME>. All Rights Reserved. * * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.dianping.cat.system.page.config; public enum Action implements org.unidal.web.mvc.Action { PROJECT_ALL("projects"), PROJECT_ADD("projectAdd"), PROJECT_UPDATE_SUBMIT("updateSubmit"), PROJECT_DELETE("projectDelete"), TOPOLOGY_GRAPH_NODE_CONFIG_LIST("topologyGraphNodeConfigList"), TOPOLOGY_GRAPH_NODE_CONFIG_ADD_OR_UPDATE("topologyGraphNodeConfigAdd"), TOPOLOGY_GRAPH_NODE_CONFIG_ADD_OR_UPDATE_SUBMIT("topologyGraphNodeConfigAddSumbit"), TOPOLOGY_GRAPH_NODE_CONFIG_DELETE("topologyGraphNodeConfigDelete"), TOPOLOGY_GRAPH_EDGE_CONFIG_ADD_OR_UPDATE("topologyGraphEdgeConfigAdd"), TOPOLOGY_GRAPH_EDGE_CONFIG_ADD_OR_UPDATE_SUBMIT("topologyGraphEdgeConfigAddSumbit"), TOPOLOGY_GRAPH_EDGE_CONFIG_DELETE("topologyGraphEdgeConfigDelete"), TOPOLOGY_GRAPH_EDGE_CONFIG_LIST("topologyGraphEdgeConfigList"), TOPO_GRAPH_FORMAT_CONFIG_UPDATE("topoGraphFormatUpdate"), HEARTBEAT_RULE_CONFIG_LIST("heartbeatRuleConfigList"), HEARTBEAT_RULE_ADD_OR_UPDATE("heartbeatRuleUpdate"), HEARTBEAT_RULE_ADD_OR_UPDATE_SUBMIT("heartbeatRuleSubmit"), HEARTBEAT_RULE_DELETE("heartbeatRulDelete"), ALERT_DEFAULT_RECEIVERS("alertDefaultReceivers"), ALERT_POLICY("alertPolicy"), HEARTBEAT_DISPLAY_POLICY("displayPolicy"), EXCEPTION("exception"), EXCEPTION_THRESHOLD_UPDATE("exceptionThresholdUpdate"), EXCEPTION_THRESHOLD_ADD("exceptionThresholdAdd"), EXCEPTION_THRESHOLD_UPDATE_SUBMIT("exceptionThresholdUpdateSubmit"), EXCEPTION_THRESHOLD_DELETE("exceptionThresholdDelete"), EXCEPTION_EXCLUDE_ADD("exceptionExcludeAdd"), EXCEPTION_EXCLUDE_UPDATE_SUBMIT("exceptionExcludeUpdateSubmit"), EXCEPTION_EXCLUDE_DELETE("exceptionExcludeDelete"), TRANSACTION_RULE("transactionRule"), TRANSACTION_RULE_ADD_OR_UPDATE("transactionRuleUpdate"), TRANSACTION_RULE_ADD_OR_UPDATE_SUBMIT("transactionRuleSubmit"), TRANSACTION_RULE_DELETE("transactionRuleDelete"), EVENT_RULE("eventRule"), EVENT_RULE_ADD_OR_UPDATE("eventRuleUpdate"), EVENT_RULE_ADD_OR_UPDATE_SUBMIT("eventRuleSubmit"), EVENT_RULE_DELETE("eventRuleDelete"), STORAGE_RULE("storageRule"), STORAGE_RULE_ADD_OR_UPDATE("storageRuleUpdate"), STORAGE_RULE_ADD_OR_UPDATE_SUBMIT("storageRuleSubmit"), STORAGE_RULE_DELETE("storageRuleDelete"), STORAGE_GROUP_CONFIG_UPDATE("storageGroupConfigUpdate"), DOMAIN_GROUP_CONFIGS("domainGroupConfigs"), DOMAIN_GROUP_CONFIG_UPDATE("domainGroupConfigUpdate"), DOMAIN_GROUP_CONFIG_SUBMIT("domainGroupConfigSubmit"), DOMAIN_GROUP_CONFIG_DELETE("domainGroupConfigDelete"), ROUTER_CONFIG_UPDATE("routerConfigUpdate"), SAMPLE_CONFIG_UPDATE("sampleConfigUpdate"), ALERT_SENDER_CONFIG_UPDATE("alertSenderConfigUpdate"), SERVER_FILTER_CONFIG_UPDATE("serverFilterConfigUpdate"), SERVER_CONFIG_UPDATE("serverConfigUpdate"), REPORT_RELOAD_CONFIG_UPDATE("reportReloadConfigUpdate"), ALL_REPORT_CONFIG("allReportConfig"); private String m_name; private Action(String name) { m_name = name; } public static Action getByName(String name, Action defaultAction) { for (Action action : Action.values()) { if (action.getName().equals(name)) { return action; } } return defaultAction; } @Override public String getName() { return m_name; } }
1,362
5,169
{ "name": "LBYNetworking", "version": "0.1.0", "summary": "LBYNetworking verry sample", "description": "this is to use demo.\nso I'm glad to share with you.", "homepage": "https://github.com/LucyBenYing/LBYNetworking", "license": { "type": "MIT", "file": "LICENSE" }, "authors": { "LucyBenYing": "<EMAIL>" }, "source": { "git": "https://github.com/LucyBenYing/LBYNetworking.git", "tag": "0.1.0" }, "platforms": { "ios": "8.0" }, "source_files": "LBYNetworking/Classes/**/*" }
233
1,837
/* * Copyright (c) 2011-2018, <NAME>. All Rights Reserved. * * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * *    http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.dianping.zebra.shard.merge; import java.io.ByteArrayInputStream; import java.io.InputStream; import java.io.Reader; import java.io.StringReader; import java.math.BigDecimal; import java.math.BigInteger; import java.net.MalformedURLException; import java.net.URL; import java.sql.Array; import java.sql.Blob; import java.sql.Clob; import java.sql.Date; import java.sql.NClob; import java.sql.Ref; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.RowId; import java.sql.SQLException; import java.sql.SQLXML; import java.sql.Statement; import java.sql.Time; import java.sql.Timestamp; import java.util.ArrayList; import java.util.Calendar; import java.util.List; import java.util.Map; import com.dianping.zebra.shard.jdbc.unsupport.UnsupportedShardResultSet; /** * 数据池, 用于隐藏真实数据的来源。<br> * 真实数据来源包括两个可能: * <ol> * <li>由若干个<tt>ResultSet</tt>简单串联组成的复合<tt>ResultSet</tt> ,这种情况通常不需要Zebra进行数据处理(没有跨表跨库orderby并且没有跨库跨表聚合函数列存在)。</li> * <li>由若干个<tt>ResultSet</tt>中的所有数据经过全局排序以及数据合并(主要针对跨库跨表的全局聚合函数)而得到的 <tt>List</tt>组成。这种情况的数据池称作内存数据池</li> * </ol> * 在遍历数据的时候,<br> * 对于第一种情况,我们只要简单的按照顺序遍历每一个<tt>ResultSet</tt>并且调用具体的<tt>ResultSet</tt>方法即可。<br> * 对于第二种情况,我们需要遍历<tt>List</tt>,同时进行必要的数据类型转换。<br> * <p/> * 数据池支持limit子句的,通过设定对应的<tt>skip</tt>,<tt>max</tt>属性并调用<tt>procLimit</tt> 方法以调整数据池的初始状态。<br> * * @author <NAME> */ public class ShardResultSetAdaptor extends UnsupportedShardResultSet implements ResultSet { protected List<ResultSet> resultSets = new ArrayList<ResultSet>(); protected List<RowData> memoryData; protected boolean inMemory = false; protected int resultSetIndex = 0; protected int rowNum = 0; protected int skip = MergeContext.NO_OFFSET; protected int max = MergeContext.NO_LIMIT; private boolean wasNull = false; private ResultSetMetaData memoryResultSetMetaData; // for getMetaData bug when procLimit return 0 rows private int resultSetType = ResultSet.TYPE_FORWARD_ONLY; // for getMetaData bug when procLimit return 0 rows /** * 滚动数据池游标到下一条记录 * * @return * @throws java.sql.SQLException */ public boolean next() throws SQLException { rowNum++; if (!inMemory) { if (max != MergeContext.NO_LIMIT && rowNum > max) { return false; } if (resultSets.size() > 0) { if (resultSetIndex >= resultSets.size()) { return false; } if (!resultSets.get(resultSetIndex).next()) { while (++resultSetIndex < resultSets.size()) { if (resultSets.get(resultSetIndex).next()) { break; } } if (resultSetIndex >= resultSets.size()) { return false; } else { return true; } } else { return true; } } else { return false; } } else { return rowNum - 1 < memoryData.size(); } } /** * @return the skip */ public int getSkip() { return skip; } /** * @param skip * the skip to set */ public void setSkip(int skip) { this.skip = skip; } /** * @return the max */ public int getMax() { return max; } /** * @param max * the max to set */ public void setMax(int max) { this.max = max; } /** * 设定内存数据(<tt>List</tt>) * * @param memoryData * the memoryData to set */ public void setMemoryData(List<RowData> memoryData) { this.inMemory = true; this.memoryData = memoryData; } /** * 是否内存数据池 * * @return the inMemory */ public boolean isInMemory() { return inMemory; } public void setResultSets(List<ResultSet> resultSets) { this.resultSets = resultSets; } /** * <p> * 处理limit * </p> * * @throws java.sql.SQLException */ public void procLimit() throws SQLException { if (inMemory) { int fromIndex = skip == MergeContext.NO_OFFSET ? 0 : skip; if (fromIndex >= memoryData.size()) { if (this.memoryData.size() > 0) { this.memoryResultSetMetaData = memoryData.get(0).getResultSetMetaData(); this.resultSetType = memoryData.get(0).getResultSetType(); } this.memoryData = new ArrayList<RowData>(); return; } int toIndex = max == MergeContext.NO_LIMIT ? memoryData.size() : fromIndex + max; toIndex = toIndex > memoryData.size() ? memoryData.size() : toIndex; List<RowData> subDataList = memoryData.subList(fromIndex, toIndex); this.memoryData = new ArrayList<RowData>(subDataList); } else { if (skip > 0) { int rowSkipped = 0; for (int i = 0; i < resultSets.size(); i++) { resultSetIndex = i; while (resultSets.get(i).next()) { if (++rowSkipped >= skip) { break; } } if (rowSkipped >= skip) { break; } } } } } /** * 清理数据池中所有数据,并重置所有状态位 */ public void clear() { this.resultSets.clear(); if (inMemory && this.memoryData != null) { this.memoryData.clear(); } this.inMemory = false; this.resultSetIndex = 0; this.rowNum = 0; } /** * 获得当前数据偏移辆(以1开始) * * @return */ public int getCurrentRowNo() { return rowNum; } public int findColumn(String columnName) throws SQLException { if (inMemory) { return memoryData.get(rowNum - 1).getIndexByName(columnName); } else { return resultSets.get(resultSetIndex).findColumn(columnName); } } public Array getArray(int columnIndex) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnIndex).isWasNull(); if (wasNull) { return null; } else { return (Array) memoryData.get(rowNum - 1).get(columnIndex).getValue(); } } else { return resultSets.get(resultSetIndex).getArray(columnIndex); } } public Array getArray(String columnName) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnName).isWasNull(); if (wasNull) { return null; } else { return (Array) memoryData.get(rowNum - 1).get(columnName).getValue(); } } else { return resultSets.get(resultSetIndex).getArray(columnName); } } public InputStream getAsciiStream(int columnIndex) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnIndex).isWasNull(); if (wasNull) { return null; } else { Object value = memoryData.get(rowNum - 1).get(columnIndex).getValue(); if (value != null) { return new ByteArrayInputStream((byte[]) value); } return null; } } else { return resultSets.get(resultSetIndex).getAsciiStream(columnIndex); } } public InputStream getAsciiStream(String columnName) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnName).isWasNull(); if (wasNull) { return null; } else { Object value = memoryData.get(rowNum - 1).get(columnName).getValue(); if (value != null) { return new ByteArrayInputStream((byte[]) value); } return null; } } else { return resultSets.get(resultSetIndex).getAsciiStream(columnName); } } public BigDecimal getBigDecimal(int columnIndex) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnIndex).isWasNull(); if (wasNull) { return null; } else { Object value = memoryData.get(rowNum - 1).get(columnIndex).getValue(); if (value instanceof BigDecimal) { return (BigDecimal) value; } else { return new BigDecimal(value.toString()); } } } else { return resultSets.get(resultSetIndex).getBigDecimal(columnIndex); } } public BigDecimal getBigDecimal(String columnName) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnName).isWasNull(); if (wasNull) { return null; } else { Object value = memoryData.get(rowNum - 1).get(columnName).getValue(); if (value instanceof BigDecimal) { return (BigDecimal) value; } else { return new BigDecimal(value.toString()); } } } else { return resultSets.get(resultSetIndex).getBigDecimal(columnName); } } @SuppressWarnings("deprecation") public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnIndex).isWasNull(); if (wasNull) { return null; } else { Object value = memoryData.get(rowNum - 1).get(columnIndex).getValue(); if (value instanceof BigDecimal) { return ((BigDecimal) value).setScale(scale); } else { return new BigDecimal(value.toString()).setScale(scale); } } } else { return resultSets.get(resultSetIndex).getBigDecimal(columnIndex, scale); } } @SuppressWarnings("deprecation") public BigDecimal getBigDecimal(String columnName, int scale) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnName).isWasNull(); if (wasNull) { return null; } else { Object value = memoryData.get(rowNum - 1).get(columnName).getValue(); if (value instanceof BigDecimal) { return ((BigDecimal) value).setScale(scale); } else { return new BigDecimal(value.toString()).setScale(scale); } } } else { return resultSets.get(resultSetIndex).getBigDecimal(columnName, scale); } } public InputStream getBinaryStream(int columnIndex) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnIndex).isWasNull(); if (wasNull) { return null; } else { Object value = memoryData.get(rowNum - 1).get(columnIndex).getValue(); if (value != null) { return new ByteArrayInputStream((byte[]) value); } return null; } } else { return resultSets.get(resultSetIndex).getBinaryStream(columnIndex); } } public InputStream getBinaryStream(String columnName) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnName).isWasNull(); if (wasNull) { return null; } else { Object value = memoryData.get(rowNum - 1).get(columnName).getValue(); if (value != null) { return new ByteArrayInputStream((byte[]) value); } return null; } } else { return resultSets.get(resultSetIndex).getBinaryStream(columnName); } } public Blob getBlob(int columnIndex) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnIndex).isWasNull(); if (wasNull) { return null; } else { Object value = memoryData.get(rowNum - 1).get(columnIndex).getValue(); return new com.dianping.zebra.shard.resultset.Blob((byte[]) value); } } else { return resultSets.get(resultSetIndex).getBlob(columnIndex); } } public Blob getBlob(String columnName) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnName).isWasNull(); if (wasNull) { return null; } else { Object value = memoryData.get(rowNum - 1).get(columnName).getValue(); return new com.dianping.zebra.shard.resultset.Blob((byte[]) value); } } else { return resultSets.get(resultSetIndex).getBlob(columnName); } } public boolean getBoolean(int columnIndex) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnIndex).isWasNull(); if (wasNull) { return false; } else { Object result = memoryData.get(rowNum - 1).get(columnIndex).getValue(); if (result instanceof Boolean) { return (Boolean) result; } else if (result instanceof String) { String stringVal = String.valueOf(result); int c = Character.toLowerCase(stringVal.charAt(0)); return ((c == 't') || (c == '1') || stringVal.equals("-1")); } else { Long longVal = Long.parseLong(String.valueOf(result)); return (longVal > 0 || longVal == -1); } } } else { return resultSets.get(resultSetIndex).getBoolean(columnIndex); } } public boolean getBoolean(String columnName) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnName).isWasNull(); if (wasNull) { return false; } else { Object result = memoryData.get(rowNum - 1).get(columnName).getValue(); if (result instanceof Boolean) { return (Boolean) result; } else if (result instanceof String) { String stringVal = String.valueOf(result); int c = Character.toLowerCase(stringVal.charAt(0)); return ((c == 't') || (c == '1') || stringVal.equals("-1")); } else { Long longVal = Long.parseLong(String.valueOf(result)); return (longVal > 0 || longVal == -1); } } } else { return resultSets.get(resultSetIndex).getBoolean(columnName); } } public byte getByte(int columnIndex) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnIndex).isWasNull(); if (wasNull) { return 0; } else { Object value = memoryData.get(rowNum - 1).get(columnIndex).getValue(); return Byte.parseByte(value.toString()); } } else { return resultSets.get(resultSetIndex).getByte(columnIndex); } } public byte getByte(String columnName) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnName).isWasNull(); if (wasNull) { return 0; } else { Object value = memoryData.get(rowNum - 1).get(columnName).getValue(); return Byte.parseByte(value.toString()); } } else { return resultSets.get(resultSetIndex).getByte(columnName); } } public byte[] getBytes(int columnIndex) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnIndex).isWasNull(); if (wasNull) { return null; } else { return (byte[]) memoryData.get(rowNum - 1).get(columnIndex).getValue(); } } else { return resultSets.get(resultSetIndex).getBytes(columnIndex); } } public byte[] getBytes(String columnName) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnName).isWasNull(); if (wasNull) { return null; } else { return (byte[]) memoryData.get(rowNum - 1).get(columnName).getValue(); } } else { return resultSets.get(resultSetIndex).getBytes(columnName); } } public Reader getCharacterStream(int columnIndex) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnIndex).isWasNull(); if (wasNull) { return null; } else { Object value = memoryData.get(rowNum - 1).get(columnIndex).getValue(); return new StringReader(value.toString()); } } else { return resultSets.get(resultSetIndex).getCharacterStream(columnIndex); } } public Reader getCharacterStream(String columnName) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnName).isWasNull(); if (wasNull) { return null; } else { Object value = memoryData.get(rowNum - 1).get(columnName).getValue(); return new StringReader(value.toString()); } } else { return resultSets.get(resultSetIndex).getCharacterStream(columnName); } } public Clob getClob(int columnIndex) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnIndex).isWasNull(); if (wasNull) { return null; } else { Object value = memoryData.get(rowNum - 1).get(columnIndex).getValue(); return new com.dianping.zebra.shard.resultset.Clob(value.toString()); } } else { return resultSets.get(resultSetIndex).getClob(columnIndex); } } public Clob getClob(String columnName) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnName).isWasNull(); if (wasNull) { return null; } else { Object value = memoryData.get(rowNum - 1).get(columnName).getValue(); return new com.dianping.zebra.shard.resultset.Clob(value.toString()); } } else { return resultSets.get(resultSetIndex).getClob(columnName); } } public int getConcurrency() throws SQLException { if (inMemory) { return memoryData.get(rowNum - 1).getConcurrency(); } else { return resultSets.get(resultSetIndex).getConcurrency(); } } public String getCursorName() throws SQLException { if (inMemory) { return memoryData.get(rowNum - 1).getCursorName(); } else { return resultSets.get(resultSetIndex).getCursorName(); } } public Date getDate(int columnIndex) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnIndex).isWasNull(); if (wasNull) { return null; } else { Object value = memoryData.get(rowNum - 1).get(columnIndex).getValue(); if (value instanceof String) { return Date.valueOf(value.toString()); } else { return (Date) value; } } } else { return resultSets.get(resultSetIndex).getDate(columnIndex); } } public Date getDate(String columnName) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnName).isWasNull(); if (wasNull) { return null; } else { Object value = memoryData.get(rowNum - 1).get(columnName).getValue(); if (value instanceof String) { return Date.valueOf(value.toString()); } else { return (Date) value; } } } else { return resultSets.get(resultSetIndex).getDate(columnName); } } public Date getDate(int columnIndex, Calendar cal) throws SQLException { if (inMemory) { throw new UnsupportedOperationException( "Zebra unsupport getDate with Calendar in a multi actual datasource query."); } else { return resultSets.get(columnIndex).getDate(columnIndex, cal); } } public Date getDate(String columnName, Calendar cal) throws SQLException { if (inMemory) { throw new UnsupportedOperationException( "Zebra unsupport getDate with Calendar in a multi actual datasource query."); } else { return resultSets.get(resultSetIndex).getDate(columnName, cal); } } public double getDouble(int columnIndex) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnIndex).isWasNull(); if (wasNull) { return 0; } else { Object val = memoryData.get(rowNum - 1).get(columnIndex).getValue(); if (val instanceof Double) { return (Double) val; } else { return Double.parseDouble(val.toString()); } } } else { return resultSets.get(resultSetIndex).getDouble(columnIndex); } } public double getDouble(String columnName) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnName).isWasNull(); if (wasNull) { return 0; } else { Object val = memoryData.get(rowNum - 1).get(columnName).getValue(); if (val instanceof Double) { return (Double) val; } else { return Double.parseDouble(val.toString()); } } } else { return resultSets.get(resultSetIndex).getDouble(columnName); } } public float getFloat(int columnIndex) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnIndex).isWasNull(); if (wasNull) { return 0; } else { Object val = memoryData.get(rowNum - 1).get(columnIndex).getValue(); if (val instanceof Float) { return (Float) val; } else { return Float.parseFloat(val.toString()); } } } else { return resultSets.get(resultSetIndex).getFloat(columnIndex); } } public float getFloat(String columnName) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnName).isWasNull(); if (wasNull) { return 0; } else { Object val = memoryData.get(rowNum - 1).get(columnName).getValue(); if (val instanceof Float) { return (Float) val; } else { return Float.parseFloat(val.toString()); } } } else { return resultSets.get(resultSetIndex).getFloat(columnName); } } public int getHoldability() throws SQLException { if (inMemory) { return memoryData.get(rowNum - 1).getHoldability(); } else { return resultSets.get(resultSetIndex).getHoldability(); } } public int getInt(int columnIndex) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnIndex).isWasNull(); if (wasNull) { return 0; } else { Object val = memoryData.get(rowNum - 1).get(columnIndex).getValue(); if (val instanceof Integer) { return (Integer) val; } else if (val instanceof Boolean) { return (Boolean) val ? 1 : 0; } else { return Integer.parseInt(val.toString()); } } } else { return resultSets.get(resultSetIndex).getInt(columnIndex); } } public int getInt(String columnName) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnName).isWasNull(); if (wasNull) { return 0; } else { Object val = memoryData.get(rowNum - 1).get(columnName).getValue(); if (val instanceof Integer) { return (Integer) val; } else if (val instanceof Boolean) { return (Boolean) val ? 1 : 0; } else { return Integer.parseInt(val.toString()); } } } else { return resultSets.get(resultSetIndex).getInt(columnName); } } public long getLong(int columnIndex) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnIndex).isWasNull(); if (wasNull) { return 0; } else { Object val = memoryData.get(rowNum - 1).get(columnIndex).getValue(); if (val instanceof Long) { return (Long) val; } else if (val instanceof BigInteger) { return ((BigInteger) val).longValue(); } else if (val instanceof Boolean) { return (Boolean) val ? 1L : 0L; } else { return Long.parseLong(val.toString()); } } } else { return resultSets.get(resultSetIndex).getLong(columnIndex); } } public long getLong(String columnName) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnName).isWasNull(); if (wasNull) { return 0; } else { Object val = memoryData.get(rowNum - 1).get(columnName).getValue(); if (val instanceof Long) { return (Long) val; } else if (val instanceof BigInteger) { return ((BigInteger) val).longValue(); } else if (val instanceof Boolean) { return (Boolean) val ? 1L : 0L; } else { return Long.parseLong(val.toString()); } } } else { return resultSets.get(resultSetIndex).getLong(columnName); } } public ResultSetMetaData getMetaData() throws SQLException { if (inMemory) { if (memoryData.size() > 0) { return memoryData.get(rowNum == 0 ? 0 : rowNum - 1).getResultSetMetaData(); } else { if (this.memoryResultSetMetaData != null) { return this.memoryResultSetMetaData; } else { return resultSets.get(0).getMetaData(); } } } else { return resultSets.get(resultSetIndex).getMetaData(); } } public Reader getNCharacterStream(int columnIndex) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnIndex).isWasNull(); if (wasNull) { return null; } else { return (Reader) memoryData.get(rowNum - 1).get(columnIndex).getValue(); } } else { return resultSets.get(resultSetIndex).getNCharacterStream(columnIndex); } } public Reader getNCharacterStream(String columnName) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnName).isWasNull(); if (wasNull) { return null; } else { return (Reader) memoryData.get(rowNum - 1).get(columnName).getValue(); } } else { return resultSets.get(resultSetIndex).getNCharacterStream(columnName); } } public NClob getNClob(int columnIndex) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnIndex).isWasNull(); if (wasNull) { return null; } else { return (NClob) memoryData.get(rowNum - 1).get(columnIndex).getValue(); } } else { return resultSets.get(resultSetIndex).getNClob(columnIndex); } } public NClob getNClob(String columnName) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnName).isWasNull(); if (wasNull) { return null; } else { return (NClob) memoryData.get(rowNum - 1).get(columnName).getValue(); } } else { return resultSets.get(resultSetIndex).getNClob(columnName); } } public String getNString(int columnIndex) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnIndex).isWasNull(); if (wasNull) { return null; } else { return String.valueOf(memoryData.get(rowNum - 1).get(columnIndex).getValue()); } } else { return resultSets.get(resultSetIndex).getNString(columnIndex); } } public String getNString(String columnName) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnName).isWasNull(); if (wasNull) { return null; } else { return String.valueOf(memoryData.get(rowNum - 1).get(columnName).getValue()); } } else { return resultSets.get(resultSetIndex).getNString(columnName); } } public Object getObject(int columnIndex) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnIndex).isWasNull(); return memoryData.get(rowNum - 1).get(columnIndex).getValue(); } else { return resultSets.get(resultSetIndex).getObject(columnIndex); } } public Object getObject(String columnName) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnName).isWasNull(); return memoryData.get(rowNum - 1).get(columnName).getValue(); } else { return resultSets.get(resultSetIndex).getObject(columnName); } } public Object getObject(int columnIndex, Map<String, Class<?>> map) throws SQLException { // Mysql Connector-j doesn't use the parameter map at all.... if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnIndex).isWasNull(); return memoryData.get(rowNum - 1).get(columnIndex).getValue(); } else { return resultSets.get(resultSetIndex).getObject(columnIndex); } } public Object getObject(String columnName, Map<String, Class<?>> map) throws SQLException { // Mysql Connector-j doesn't use the parameter map at all.... if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnName).isWasNull(); return memoryData.get(rowNum - 1).get(columnName).getValue(); } else { return resultSets.get(resultSetIndex).getObject(columnName); } } public Ref getRef(int columnIndex) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnIndex).isWasNull(); if (wasNull) { return null; } else { return (Ref) memoryData.get(rowNum - 1).get(columnIndex).getValue(); } } else { return resultSets.get(resultSetIndex).getRef(columnIndex); } } public Ref getRef(String columnName) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnName).isWasNull(); return (Ref) memoryData.get(rowNum - 1).get(columnName).getValue(); } else { return resultSets.get(resultSetIndex).getRef(columnName); } } public RowId getRowId(int columnIndex) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnIndex).isWasNull(); if (wasNull) { return null; } else { return (RowId) memoryData.get(rowNum - 1).get(columnIndex).getRowId(); } } else { return resultSets.get(resultSetIndex).getRowId(columnIndex); } } public RowId getRowId(String columnName) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnName).isWasNull(); if (wasNull) { return null; } else { return (RowId) memoryData.get(rowNum - 1).get(columnName).getRowId(); } } else { return resultSets.get(resultSetIndex).getRowId(columnName); } } public SQLXML getSQLXML(int columnIndex) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnIndex).isWasNull(); if (wasNull) { return null; } else { return (SQLXML) memoryData.get(rowNum - 1).get(columnIndex).getValue(); } } else { return resultSets.get(resultSetIndex).getSQLXML(columnIndex); } } public SQLXML getSQLXML(String columnName) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnName).isWasNull(); if (wasNull) { return null; } else { return (SQLXML) memoryData.get(rowNum - 1).get(columnName).getValue(); } } else { return resultSets.get(resultSetIndex).getSQLXML(columnName); } } public short getShort(int columnIndex) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnIndex).isWasNull(); if (wasNull) { return 0; } else { Object value = memoryData.get(rowNum - 1).get(columnIndex).getValue(); if (value instanceof Short) { return (Short) value; } else { return Short.parseShort(value.toString()); } } } else { return resultSets.get(resultSetIndex).getShort(columnIndex); } } public short getShort(String columnName) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnName).isWasNull(); if (wasNull) { return 0; } else { Object value = memoryData.get(rowNum - 1).get(columnName).getValue(); if (value instanceof Short) { return (Short) value; } else { return Short.parseShort(value.toString()); } } } else { return resultSets.get(resultSetIndex).getShort(columnName); } } public String getString(int columnIndex) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnIndex).isWasNull(); if (wasNull) { return null; } else { Object value = memoryData.get(rowNum - 1).get(columnIndex).getValue(); if (value instanceof String) { return (String) value; } else if (value instanceof byte[]) { return new String((byte[]) value); } else { return String.valueOf(value); } } } else { return resultSets.get(resultSetIndex).getString(columnIndex); } } public String getString(String columnName) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnName).isWasNull(); if (wasNull) { return null; } else { Object value = memoryData.get(rowNum - 1).get(columnName).getValue(); if (value instanceof String) { return (String) value; } else if (value instanceof byte[]) { return new String((byte[]) value); } else { return String.valueOf(value); } } } else { return resultSets.get(resultSetIndex).getString(columnName); } } public Time getTime(int columnIndex) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnIndex).isWasNull(); if (wasNull) { return null; } else { Object value = memoryData.get(rowNum - 1).get(columnIndex).getValue(); if (value instanceof Time) { return (Time) value; } else { return Time.valueOf(value.toString()); } } } else { return resultSets.get(resultSetIndex).getTime(columnIndex); } } public Time getTime(String columnName) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnName).isWasNull(); if (wasNull) { return null; } else { Object value = memoryData.get(rowNum - 1).get(columnName).getValue(); if (value instanceof Time) { return (Time) value; } else { return Time.valueOf(value.toString()); } } } else { return resultSets.get(resultSetIndex).getTime(columnName); } } public Time getTime(int columnIndex, Calendar cal) throws SQLException { if (inMemory) { throw new UnsupportedOperationException( "Zebra unsupport getTime with Calendar in a multi actual datasource query."); } else { return resultSets.get(resultSetIndex).getTime(columnIndex, cal); } } public Time getTime(String columnName, Calendar cal) throws SQLException { if (inMemory) { throw new UnsupportedOperationException( "Zebra unsupport getTime with Calendar in a multi actual datasource query."); } else { return resultSets.get(resultSetIndex).getTime(columnName, cal); } } public Timestamp getTimestamp(int columnIndex) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnIndex).isWasNull(); if (wasNull) { return null; } else { Object value = memoryData.get(rowNum - 1).get(columnIndex).getValue(); if (value instanceof Timestamp) { return (Timestamp) value; } else if (value instanceof Date) { return new Timestamp(((Date) value).getTime()); } else if(value instanceof Time) { return new Timestamp(((Time) value).getTime()); } else { return Timestamp.valueOf(value.toString()); } } } else { return resultSets.get(resultSetIndex).getTimestamp(columnIndex); } } public Timestamp getTimestamp(String columnName) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnName).isWasNull(); if (wasNull) { return null; } else { Object value = memoryData.get(rowNum - 1).get(columnName).getValue(); if (value instanceof Timestamp) { return (Timestamp) value; } else if (value instanceof Date) { return new Timestamp(((Date) value).getTime()); } else if(value instanceof Time) { return new Timestamp(((Time) value).getTime()); } else { return Timestamp.valueOf(value.toString()); } } } else { return resultSets.get(resultSetIndex).getTimestamp(columnName); } } public Timestamp getTimestamp(int columnIndex, Calendar cal) throws SQLException { if (inMemory) { throw new UnsupportedOperationException( "Zebra unsupport getTimestamp with Calendar in a multi actual datasource query."); } else { return resultSets.get(resultSetIndex).getTimestamp(columnIndex, cal); } } public Timestamp getTimestamp(String columnName, Calendar cal) throws SQLException { if (inMemory) { throw new UnsupportedOperationException( "Zebra unsupport getTimestamp with Calendar in a multi actual datasource query."); } else { return resultSets.get(resultSetIndex).getTimestamp(columnName, cal); } } public int getType() throws SQLException { if (inMemory) { if (rowNum >= 1) { return memoryData.get(rowNum - 1).getResultSetType(); } else if (memoryData != null && memoryData.size() > 0) { return memoryData.get(0).getResultSetType(); } else { if (resultSets.size() > 0) { return resultSets.get(resultSetIndex).getType(); } return resultSetType; } } else { return resultSets.get(resultSetIndex).getType(); } } public URL getURL(int columnIndex) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnIndex).isWasNull(); if (wasNull) { return null; } else { Object value = memoryData.get(rowNum - 1).get(columnIndex).getValue(); try { return new URL(value.toString()); } catch (MalformedURLException mfe) { throw new SQLException("ResultSet.Malformed_URL '" + value + "'"); } } } else { return resultSets.get(resultSetIndex).getURL(columnIndex); } } public URL getURL(String columnName) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnName).isWasNull(); if (wasNull) { return null; } else { Object value = memoryData.get(rowNum - 1).get(columnName).getValue(); try { return new URL(value.toString()); } catch (MalformedURLException mfe) { throw new SQLException("ResultSet.Malformed_URL '" + value + "'"); } } } else { return resultSets.get(resultSetIndex).getURL(columnName); } } @SuppressWarnings("deprecation") public InputStream getUnicodeStream(int columnIndex) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnIndex).isWasNull(); if (wasNull) { return null; } else { Object value = memoryData.get(rowNum - 1).get(columnIndex).getValue(); if (value != null) { return new ByteArrayInputStream((byte[]) value); } return null; } } else { return resultSets.get(resultSetIndex).getUnicodeStream(columnIndex); } } @SuppressWarnings("deprecation") public InputStream getUnicodeStream(String columnName) throws SQLException { if (inMemory) { wasNull = memoryData.get(rowNum - 1).get(columnName).isWasNull(); if (wasNull) { return null; } else { Object value = memoryData.get(rowNum - 1).get(columnName).getValue(); if (value != null) { return new ByteArrayInputStream((byte[]) value); } return null; } } else { return resultSets.get(resultSetIndex).getUnicodeStream(columnName); } } public boolean wasNull() throws SQLException { if (inMemory) { return wasNull; } else { return resultSets.get(resultSetIndex).wasNull(); } } @Override public void close() throws SQLException { } @Override public int getRow() throws SQLException { return 0; } @Override public void setFetchDirection(int direction) throws SQLException { } @Override public int getFetchDirection() throws SQLException { return 0; } @Override public void setFetchSize(int rows) throws SQLException { } @Override public int getFetchSize() throws SQLException { return 0; } @Override public Statement getStatement() throws SQLException { return null; } @Override public boolean isClosed() throws SQLException { return false; } }
15,613
1,562
<reponame>Masud2017/sandboxed-api<filename>oss-internship-2020/guetzli/guetzli_entry_points.cc<gh_stars>1000+ // Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "guetzli_entry_points.h" // NOLINT(build/include) #include <sys/stat.h> #include <algorithm> #include <cstdio> #include <iostream> #include <string> #include <vector> #include "guetzli/jpeg_data_reader.h" #include "guetzli/quality.h" #include "png.h" // NOLINT(build/include) #include "absl/status/statusor.h" #include "sandboxed_api/util/fileops.h" namespace { constexpr int kBytesPerPixel = 350; constexpr int kLowestMemusageMB = 100; struct GuetzliInitData { std::string in_data; guetzli::Params params; guetzli::ProcessStats stats; }; struct ImageData { int xsize; int ysize; std::vector<uint8_t> rgb; }; sapi::LenValStruct CreateLenValFromData(const void* data, size_t size) { void* new_data = malloc(size); memcpy(new_data, data, size); return {size, new_data}; } absl::StatusOr<std::string> ReadFromFd(int fd) { struct stat file_data; int status = fstat(fd, &file_data); if (status < 0) { return absl::FailedPreconditionError("Error reading input from fd"); } std::string result; result.resize(file_data.st_size); status = read(fd, result.data(), result.size()); if (status < 0) { return absl::FailedPreconditionError("Error reading input from fd"); } return result; } absl::StatusOr<GuetzliInitData> PrepareDataForProcessing( const ProcessingParams& processing_params) { absl::StatusOr<std::string> input = ReadFromFd(processing_params.remote_fd); if (!input.ok()) { return input.status(); } guetzli::Params guetzli_params; guetzli_params.butteraugli_target = static_cast<float>( guetzli::ButteraugliScoreForQuality(processing_params.quality)); guetzli::ProcessStats stats; if (processing_params.verbose) { stats.debug_output_file = stderr; } return GuetzliInitData{std::move(input.value()), guetzli_params, stats}; } inline uint8_t BlendOnBlack(const uint8_t val, const uint8_t alpha) { return (static_cast<int>(val) * static_cast<int>(alpha) + 128) / 255; } // Modified version of ReadPNG from original guetzli.cc absl::StatusOr<ImageData> ReadPNG(const std::string& data) { std::vector<uint8_t> rgb; int xsize, ysize; png_structp png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, nullptr, nullptr, nullptr); if (!png_ptr) { return absl::FailedPreconditionError( "Error reading PNG data from input file"); } png_infop info_ptr = png_create_info_struct(png_ptr); if (!info_ptr) { png_destroy_read_struct(&png_ptr, nullptr, nullptr); return absl::FailedPreconditionError( "Error reading PNG data from input file"); } if (setjmp(png_jmpbuf(png_ptr)) != 0) { // Ok we are here because of the setjmp. png_destroy_read_struct(&png_ptr, &info_ptr, nullptr); return absl::FailedPreconditionError( "Error reading PNG data from input file"); } std::istringstream memstream(data, std::ios::in | std::ios::binary); png_set_read_fn( png_ptr, static_cast<void*>(&memstream), [](png_structp png_ptr, png_bytep outBytes, png_size_t byteCountToRead) { std::istringstream& memstream = *static_cast<std::istringstream*>(png_get_io_ptr(png_ptr)); memstream.read(reinterpret_cast<char*>(outBytes), byteCountToRead); if (memstream.eof()) png_error(png_ptr, "unexpected end of data"); if (memstream.fail()) png_error(png_ptr, "read from memory error"); }); // The png_transforms flags are as follows: // packing == convert 1,2,4 bit images, // strip == 16 -> 8 bits / channel, // shift == use sBIT dynamics, and // expand == palettes -> rgb, grayscale -> 8 bit images, tRNS -> alpha. const unsigned int png_transforms = PNG_TRANSFORM_PACKING | PNG_TRANSFORM_EXPAND | PNG_TRANSFORM_STRIP_16; png_read_png(png_ptr, info_ptr, png_transforms, nullptr); png_bytep* row_pointers = png_get_rows(png_ptr, info_ptr); xsize = png_get_image_width(png_ptr, info_ptr); ysize = png_get_image_height(png_ptr, info_ptr); rgb.resize(3 * xsize * ysize); const int components = png_get_channels(png_ptr, info_ptr); switch (components) { case 1: { // GRAYSCALE for (int y = 0; y < ysize; ++y) { const uint8_t* row_in = row_pointers[y]; uint8_t* row_out = &rgb[3 * y * xsize]; for (int x = 0; x < xsize; ++x) { const uint8_t gray = row_in[x]; row_out[3 * x + 0] = gray; row_out[3 * x + 1] = gray; row_out[3 * x + 2] = gray; } } break; } case 2: { // GRAYSCALE + ALPHA for (int y = 0; y < ysize; ++y) { const uint8_t* row_in = row_pointers[y]; uint8_t* row_out = &rgb[3 * y * xsize]; for (int x = 0; x < xsize; ++x) { const uint8_t gray = BlendOnBlack(row_in[2 * x], row_in[2 * x + 1]); row_out[3 * x + 0] = gray; row_out[3 * x + 1] = gray; row_out[3 * x + 2] = gray; } } break; } case 3: { // RGB for (int y = 0; y < ysize; ++y) { const uint8_t* row_in = row_pointers[y]; uint8_t* row_out = &rgb[3 * y * xsize]; memcpy(row_out, row_in, 3 * xsize); } break; } case 4: { // RGBA for (int y = 0; y < ysize; ++y) { const uint8_t* row_in = row_pointers[y]; uint8_t* row_out = &rgb[3 * y * xsize]; for (int x = 0; x < xsize; ++x) { const uint8_t alpha = row_in[4 * x + 3]; row_out[3 * x + 0] = BlendOnBlack(row_in[4 * x + 0], alpha); row_out[3 * x + 1] = BlendOnBlack(row_in[4 * x + 1], alpha); row_out[3 * x + 2] = BlendOnBlack(row_in[4 * x + 2], alpha); } } break; } default: png_destroy_read_struct(&png_ptr, &info_ptr, nullptr); return absl::FailedPreconditionError( "Error reading PNG data from input file"); } png_destroy_read_struct(&png_ptr, &info_ptr, nullptr); return ImageData{xsize, ysize, std::move(rgb)}; } bool CheckMemoryLimitExceeded(int memlimit_mb, int xsize, int ysize) { double pixels = static_cast<double>(xsize) * ysize; return memlimit_mb != -1 && (pixels * kBytesPerPixel / (1 << 20) > memlimit_mb || memlimit_mb < kLowestMemusageMB); } } // namespace extern "C" bool ProcessJpeg(const ProcessingParams* processing_params, sapi::LenValStruct* output) { auto processing_data = PrepareDataForProcessing(*processing_params); if (!processing_data.ok()) { std::cerr << processing_data.status().ToString() << std::endl; return false; } guetzli::JPEGData jpg_header; if (!guetzli::ReadJpeg(processing_data->in_data, guetzli::JPEG_READ_HEADER, &jpg_header)) { std::cerr << "Error reading JPG data from input file" << std::endl; return false; } if (CheckMemoryLimitExceeded(processing_params->memlimit_mb, jpg_header.width, jpg_header.height)) { std::cerr << "Memory limit would be exceeded" << std::endl; return false; } std::string out_data; if (!guetzli::Process(processing_data->params, &processing_data->stats, processing_data->in_data, &out_data)) { std::cerr << "Guezli processing failed" << std::endl; return false; } *output = CreateLenValFromData(out_data.data(), out_data.size()); return true; } extern "C" bool ProcessRgb(const ProcessingParams* processing_params, sapi::LenValStruct* output) { auto processing_data = PrepareDataForProcessing(*processing_params); if (!processing_data.ok()) { std::cerr << processing_data.status().ToString() << std::endl; return false; } auto png_data = ReadPNG(processing_data->in_data); if (!png_data.ok()) { std::cerr << "Error reading PNG data from input file" << std::endl; return false; } if (CheckMemoryLimitExceeded(processing_params->memlimit_mb, png_data->xsize, png_data->ysize)) { std::cerr << "Memory limit would be exceeded" << std::endl; return false; } std::string out_data; if (!guetzli::Process(processing_data->params, &processing_data->stats, png_data->rgb, png_data->xsize, png_data->ysize, &out_data)) { std::cerr << "Guetzli processing failed" << std::endl; return false; } *output = CreateLenValFromData(out_data.data(), out_data.size()); return true; } extern "C" bool WriteDataToFd(int fd, sapi::LenValStruct* data) { return sandbox2::file_util::fileops::WriteToFD( fd, static_cast<const char*>(data->data), data->size); }
3,914
1,088
<gh_stars>1000+ /* Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <unordered_set> #include "graphlearn/core/graph/graph_store.h" #include "graphlearn/core/operator/sampler/sampler.h" #include "graphlearn/core/operator/op_factory.h" #include "graphlearn/include/sampling_request.h" #include "graphlearn/include/graph_request.h" #include "graphlearn/include/index_option.h" #include "graphlearn/platform/env.h" #include "gtest/gtest.h" #include "graphlearn/include/config.h" using namespace graphlearn; // NOLINT [build/namespaces] using namespace graphlearn::op; // NOLINT [build/namespaces] class SamplerTest : public ::testing::Test { protected: void SetUp() override { ::graphlearn::io::SideInfo info_edge; info_edge.format = ::graphlearn::io::kWeighted; info_edge.type = "u-i"; info_edge.src_type = "user"; info_edge.dst_type = "item"; std::unique_ptr<UpdateEdgesRequest> req_edge(new UpdateEdgesRequest(&info_edge, 5)); std::unique_ptr<UpdateEdgesResponse> res_edge(new UpdateEdgesResponse); ::graphlearn::io::EdgeValue value_edge; for (int32_t i = 0; i < 5; ++i) { GenEdgeValue(&value_edge, i); req_edge->Append(&value_edge); } ::graphlearn::io::SideInfo info_node; info_node.format = ::graphlearn::io::kWeighted; info_node.type = "user"; std::unique_ptr<UpdateNodesRequest> req_node(new UpdateNodesRequest(&info_node, 5)); std::unique_ptr<UpdateNodesResponse> res_node(new UpdateNodesResponse); ::graphlearn::io::NodeValue value_node; for (int32_t i = 0; i < 5; ++i) { GenNodeValue(&value_node, i); req_node->Append(&value_node); } graph_store_ = new GraphStore(Env::Default()); Graph* graph = graph_store_->GetGraph("u-i"); Noder* noder = graph_store_->GetNoder("user"); graph->UpdateEdges(req_edge.get(), res_edge.get()); noder->UpdateNodes(req_node.get(), res_node.get()); IndexOption option; option.name = "sort"; graph->Build(option); noder->Build(option); } void TearDown() override { delete graph_store_; } void GenEdgeValue(::graphlearn::io::EdgeValue* value, int32_t index) { ::graphlearn::io::IdType src_ids[5] = {0, 0, 0, 1, 1}; ::graphlearn::io::IdType dst_ids[5] = {10, 20, 30, 11, 21}; float weights[5] = {0.8, 1.0, 0.5, 0.88, 1.2}; value->src_id = src_ids[index]; value->dst_id = dst_ids[index]; value->weight = weights[index]; } void GenNodeValue(::graphlearn::io::NodeValue* value, int32_t index) { ::graphlearn::io::IdType node_ids[5] = {0, 1, 2, 3, 4}; float weights[5] = {0.8, 1.0, 0.5, 0.88, 1.2}; value->id = node_ids[index]; value->weight = weights[index]; } protected: GraphStore* graph_store_; }; TEST_F(SamplerTest, Random) { int32_t nbr_count = 2; SamplingRequest* req = new SamplingRequest("u-i", "RandomSampler", nbr_count); SamplingResponse* res = new SamplingResponse(); // 1 has neighbors {11, 21}, 2 has no neighbors int32_t batch_size = 2; int64_t ids[2] = {1, 2}; req->Set(ids, batch_size); OpFactory::GetInstance()->Set(graph_store_); Operator* op = OpFactory::GetInstance()->Create(req->Name()); EXPECT_TRUE(op != nullptr); Status s = op->Process(req, res); EXPECT_TRUE(s.ok()); EXPECT_EQ(res->BatchSize(), batch_size); EXPECT_EQ(res->NeighborCount(), nbr_count); EXPECT_EQ(res->IsSparse(), false); std::unordered_set<int64_t> nbr_set({11, 21}); const int64_t* neighbor_ids = res->GetNeighborIds(); // check neighbors of 1 for (int32_t i = 0; i < nbr_count; ++i) { EXPECT_TRUE(nbr_set.find(neighbor_ids[i]) != nbr_set.end()); } // check neighbors of 2, fill with default id for (int32_t i = nbr_count; i < batch_size * nbr_count; ++i) { EXPECT_TRUE(neighbor_ids[i] == 0); } delete res; delete req; } TEST_F(SamplerTest, RandomWithoutReplacement) { int32_t nbr_count = 3; SamplingRequest* req = new SamplingRequest( "u-i", "RandomWithoutReplacementSampler", nbr_count); SamplingResponse* res = new SamplingResponse(); // 1 has neighbors {11, 21}, 2 has no neighbors int32_t batch_size = 2; int64_t ids[2] = {1, 2}; req->Set(ids, batch_size); OpFactory::GetInstance()->Set(graph_store_); Operator* op = OpFactory::GetInstance()->Create(req->Name()); EXPECT_TRUE(op != nullptr); Status s = op->Process(req, res); EXPECT_TRUE(s.ok()); EXPECT_EQ(res->BatchSize(), batch_size); EXPECT_EQ(res->NeighborCount(), nbr_count); EXPECT_EQ(res->IsSparse(), false); std::unordered_set<int64_t> nbr_set({11, 21}); const int64_t* neighbor_ids = res->GetNeighborIds(); // check neighbors of 1 for (int32_t i = 0; i < 2; ++i) { EXPECT_TRUE(nbr_set.find(neighbor_ids[i]) != nbr_set.end()); nbr_set.erase(nbr_set.find(neighbor_ids[i])); } EXPECT_TRUE(neighbor_ids[2] == GLOBAL_FLAG(DefaultNeighborId)); // check neighbors of 2, fill with default id for (int32_t i = nbr_count; i < batch_size * nbr_count; ++i) { EXPECT_TRUE(neighbor_ids[i] == 0); } delete res; delete req; } TEST_F(SamplerTest, Topk) { int32_t nbr_count = 2; SamplingRequest* req = new SamplingRequest("u-i", "TopkSampler", nbr_count); SamplingResponse* res = new SamplingResponse(); // 0 has neighbors {10, 20, 30}, 1 has neighbors {11, 21} int32_t batch_size = 2; int64_t ids[2] = {0, 1}; req->Set(ids, batch_size); OpFactory::GetInstance()->Set(graph_store_); Operator* op = OpFactory::GetInstance()->Create(req->Name()); EXPECT_TRUE(op != nullptr); Status s = op->Process(req, res); EXPECT_TRUE(s.ok()); EXPECT_EQ(res->BatchSize(), batch_size); EXPECT_EQ(res->NeighborCount(), nbr_count); EXPECT_EQ(res->IsSparse(), false); // expected results will be ordered by edge_weight int64_t result[4] = {20, 10, 21, 11}; const int64_t* neighbor_ids = res->GetNeighborIds(); for (int32_t i = 0; i < batch_size * nbr_count; ++i) { EXPECT_EQ(neighbor_ids[i], result[i]); } delete res; delete req; } TEST_F(SamplerTest, EdgeWeight) { int32_t nbr_count = 2; SamplingRequest* req = new SamplingRequest("u-i", "EdgeWeightSampler", nbr_count); SamplingResponse* res = new SamplingResponse(); int32_t batch_size = 2; int64_t ids[2] = {0, 1}; req->Set(ids, batch_size); OpFactory::GetInstance()->Set(graph_store_); Operator* op = OpFactory::GetInstance()->Create(req->Name()); EXPECT_TRUE(op != nullptr); Status s = op->Process(req, res); EXPECT_TRUE(s.ok()); EXPECT_EQ(res->BatchSize(), batch_size); EXPECT_EQ(res->NeighborCount(), nbr_count); EXPECT_EQ(res->IsSparse(), false); const int64_t* neighbor_ids = res->GetNeighborIds(); // 0 has neighbors {10, 20, 30} std::unordered_set<int64_t> nbr_set_0({10, 20, 30}); for (int32_t i = 0; i < nbr_count; ++i) { EXPECT_TRUE(nbr_set_0.find(neighbor_ids[i]) != nbr_set_0.end()); } // 1 has neighbors {11, 21} std::unordered_set<int64_t> nbr_set_1({11, 21}); for (int32_t i = nbr_count; i < batch_size * nbr_count; ++i) { EXPECT_TRUE(nbr_set_1.find(neighbor_ids[i]) != nbr_set_1.end()); } delete res; delete req; } TEST_F(SamplerTest, InDegree) { int32_t nbr_count = 2; SamplingRequest* req = new SamplingRequest("u-i", "InDegreeSampler", nbr_count); SamplingResponse* res = new SamplingResponse(); int32_t batch_size = 2; int64_t ids[2] = {0, 1}; req->Set(ids, batch_size); OpFactory::GetInstance()->Set(graph_store_); Operator* op = OpFactory::GetInstance()->Create(req->Name()); EXPECT_TRUE(op != nullptr); Status s = op->Process(req, res); EXPECT_TRUE(s.ok()); EXPECT_EQ(res->BatchSize(), batch_size); EXPECT_EQ(res->NeighborCount(), nbr_count); EXPECT_EQ(res->IsSparse(), false); const int64_t* neighbor_ids = res->GetNeighborIds(); // 0 has neighbors {10, 20, 30} std::unordered_set<int64_t> nbr_set_0({10, 20, 30}); for (int32_t i = 0; i < nbr_count; ++i) { EXPECT_TRUE(nbr_set_0.find(neighbor_ids[i]) != nbr_set_0.end()); } // 1 has neighbors {11, 21} std::unordered_set<int64_t> nbr_set_1({11, 21}); for (int32_t i = nbr_count; i < batch_size * nbr_count; ++i) { EXPECT_TRUE(nbr_set_1.find(neighbor_ids[i]) != nbr_set_1.end()); } delete res; delete req; } TEST_F(SamplerTest, Full) { int32_t nbr_count = 2; SamplingRequest* req = new SamplingRequest("u-i", "FullSampler", nbr_count); SamplingResponse* res = new SamplingResponse(); int32_t batch_size = 2; int64_t ids[2] = {0, 1}; req->Set(ids, batch_size); OpFactory::GetInstance()->Set(graph_store_); Operator* op = OpFactory::GetInstance()->Create(req->Name()); EXPECT_TRUE(op != nullptr); Status s = op->Process(req, res); EXPECT_TRUE(s.ok()); EXPECT_EQ(res->BatchSize(), batch_size); EXPECT_EQ(res->IsSparse(), true); const int32_t* degrees = res->GetDegrees(); EXPECT_EQ(degrees[0], 2); EXPECT_EQ(degrees[1], 2); const int64_t* neighbor_ids = res->GetNeighborIds(); // 0 has neighbors {10, 20, 30} std::unordered_set<int64_t> nbr_set_0({10, 20, 30}); for (int32_t i = 0; i < 2; ++i) { EXPECT_TRUE(nbr_set_0.find(neighbor_ids[i]) != nbr_set_0.end()); } // 1 has neighbors {11, 21} std::unordered_set<int64_t> nbr_set_1({11, 21}); for (int32_t i = 2; i < 4; ++i) { EXPECT_TRUE(nbr_set_1.find(neighbor_ids[i]) != nbr_set_1.end()); } delete res; delete req; } TEST_F(SamplerTest, DISABLED_NodeWeightNegative) { int32_t nbr_count = 2; SamplingRequest* req = new SamplingRequest("user", "NodeWeightNegativeSampler", nbr_count); SamplingResponse* res = new SamplingResponse(); int32_t batch_size = 2; int64_t ids[2] = {0, 1}; req->Set(ids, batch_size); OpFactory::GetInstance()->Set(graph_store_); Operator* op = OpFactory::GetInstance()->Create(req->Name()); EXPECT_TRUE(op != nullptr); Status s = op->Process(req, res); EXPECT_TRUE(s.ok()); EXPECT_EQ(res->BatchSize(), batch_size); EXPECT_EQ(res->NeighborCount(), nbr_count); EXPECT_EQ(res->IsSparse(), false); const int64_t* neighbor_ids = res->GetNeighborIds(); // res should not be in neg_set std::unordered_set<int64_t> neg_set({0, 1}); // res should be in pos_set std::unordered_set<int64_t> pos_set({2, 3, 4}); for (int32_t i = 0; i < batch_size * nbr_count; ++i) { EXPECT_TRUE(neg_set.find(neighbor_ids[i]) == neg_set.end()); EXPECT_TRUE(pos_set.find(neighbor_ids[i]) != pos_set.end()); } delete res; delete req; }
4,418
971
<filename>dl-worker/dl-worker-core/src/main/java/com/ucar/datalink/worker/core/runtime/TaskStatusEvent.java package com.ucar.datalink.worker.core.runtime; /** * Created by lubiao on 2018/4/24. */ public class TaskStatusEvent { private String taskId; private String taskExecutionId; private Long startTime; public TaskStatusEvent(String taskId, String taskExecutionId, Long startTime) { this.taskId = taskId; this.taskExecutionId = taskExecutionId; this.startTime = startTime; } public String getTaskId() { return taskId; } public void setTaskId(String taskId) { this.taskId = taskId; } public String getTaskExecutionId() { return taskExecutionId; } public void setTaskExecutionId(String taskExecutionId) { this.taskExecutionId = taskExecutionId; } public Long getStartTime() { return startTime; } public void setStartTime(Long startTime) { this.startTime = startTime; } }
399
1,056
<filename>php/php.project/src/org/netbeans/modules/php/project/util/UsageLogging.java /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.netbeans.modules.php.project.util; import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.logging.Logger; import org.netbeans.modules.php.api.util.StringUtils; import org.netbeans.modules.php.project.PhpProject; import org.netbeans.modules.php.project.PhpProjectType; import org.netbeans.modules.php.spi.testing.PhpTestingProvider; import org.netbeans.modules.web.clientproject.api.jstesting.JsTestingProvider; import org.netbeans.modules.web.common.api.UsageLogger; public final class UsageLogging { private static final Logger LOGGER = Logger.getLogger(UsageLogging.class.getName()); private final UsageLogger testConfigUsageLogger = new UsageLogger.Builder(PhpProjectUtils.USAGE_LOGGER_NAME) .message(PhpProjectUtils.class, "USG_TEST_CONFIG_PHP") // NOI18N .firstMessageOnly(false) .create(); private final UsageLogger phpTestRunUsageLogger = new UsageLogger.Builder(PhpProjectUtils.USAGE_LOGGER_NAME) .message(PhpProjectUtils.class, "USG_TEST_RUN_PHP") // NOI18N .create(); private final UsageLogger jsTestRunUsageLogger = UsageLogger.jsTestRunUsageLogger(PhpProjectUtils.USAGE_LOGGER_NAME); //~ Helper methods public static void logTestConfig(PhpProject project, List<String> testingProviders) { assert project != null; project.getLookup().lookup(UsageLogging.class).logPhpTestConfig(testingProviders); } public static void logPhpTestRun(PhpProject project, List<PhpTestingProvider> testingProviders) { assert project != null; project.getLookup().lookup(UsageLogging.class).logPhpTestRun(testingProviders); } public static void logJsTestRun(PhpProject project, JsTestingProvider jsTestingProvider) { assert project != null; project.getLookup().lookup(UsageLogging.class).logJsTestRun(jsTestingProvider); } //~ Logging methods private void logPhpTestConfig(List<String> testingProviders) { assert testingProviders != null; LOGGER.finest("Usage logging for PHP test config"); testConfigUsageLogger.log(StringUtils.implode(testingProviders, "|")); // NOI18N } private void logPhpTestRun(List<PhpTestingProvider> testingProviders) { assert testingProviders != null; LOGGER.finest("Usage logging for PHP test run"); phpTestRunUsageLogger.log(getTestingProvidersForUsage(testingProviders)); } private void logJsTestRun(JsTestingProvider jsTestingProvider) { assert jsTestingProvider != null; jsTestRunUsageLogger.log(PhpProjectType.TYPE, jsTestingProvider.getIdentifier()); } private static String getTestingProvidersForUsage(Collection<PhpTestingProvider> testingProviders) { assert testingProviders != null; List<String> identifiers = new ArrayList<>(testingProviders.size()); for (PhpTestingProvider provider : testingProviders) { identifiers.add(provider.getIdentifier()); } return StringUtils.implode(identifiers, "|"); // NOI18N } }
1,348
571
package camelinaction; import org.apache.camel.Exchange; import org.apache.camel.component.mock.MockEndpoint; import org.apache.camel.test.spring.CamelSpringTestSupport; import org.junit.Test; import org.springframework.context.support.AbstractXmlApplicationContext; import org.springframework.context.support.ClassPathXmlApplicationContext; import org.w3c.dom.Document; /** * Test to demonstrate using @XPath with @Namespace. */ public class XmlOrderNamespaceTest extends CamelSpringTestSupport { @Override protected AbstractXmlApplicationContext createApplicationContext() { return new ClassPathXmlApplicationContext("camelinaction/xmlOrderNamespace.xml"); } @Override public void setUp() throws Exception { deleteDirectory("target/order"); super.setUp(); } @Test public void sendIncomingOrderWithNamespace() throws Exception { MockEndpoint mock = getMockEndpoint("mock:queue:order"); mock.expectedMessageCount(1); // prepare a XML document from a String which is converted to a DOM // notice we have included the namespace in the XML String body = "<order xmlns=\"http://camelinaction.com/order\" customerId=\"4444\"><item>Camel in action</item></order>"; Document xml = context.getTypeConverter().convertTo(Document.class, body); // store the order as a file which is picked up by the route template.sendBodyAndHeader("file://target/order", xml, Exchange.FILE_NAME, "order.xml"); mock.assertIsSatisfied(); } }
513
347
package org.ovirt.engine.ui.webadmin.section.main.presenter.tab.cluster; import javax.inject.Inject; import org.ovirt.engine.core.common.businessentities.Cluster; import org.ovirt.engine.core.common.businessentities.VDS; import org.ovirt.engine.ui.common.presenter.DetailActionPanelPresenterWidget; import org.ovirt.engine.ui.common.uicommon.model.SearchableDetailModelProvider; import org.ovirt.engine.ui.uicommonweb.UICommand; import org.ovirt.engine.ui.uicommonweb.models.clusters.ClusterHostListModel; import org.ovirt.engine.ui.uicommonweb.models.clusters.ClusterListModel; import org.ovirt.engine.ui.webadmin.ApplicationConstants; import org.ovirt.engine.ui.webadmin.gin.AssetProvider; import org.ovirt.engine.ui.webadmin.widget.action.WebAdminButtonDefinition; import com.google.web.bindery.event.shared.EventBus; public class ClusterHostActionPanelPresenterWidget extends DetailActionPanelPresenterWidget<Cluster, VDS, ClusterListModel<Void>, ClusterHostListModel> { private static final ApplicationConstants constants = AssetProvider.getConstants(); @Inject public ClusterHostActionPanelPresenterWidget(EventBus eventBus, DetailActionPanelPresenterWidget.ViewDef<Cluster, VDS> view, SearchableDetailModelProvider<VDS, ClusterListModel<Void>, ClusterHostListModel> dataProvider) { super(eventBus, view, dataProvider); } @Override protected void initializeButtons() { addActionButton(new WebAdminButtonDefinition<Cluster, VDS>(constants.updateMomPolicyClusterHost()) { @Override protected UICommand resolveCommand() { return getDetailModel().getUpdateMomPolicyCommand(); } }); } }
609
552
package com.mkyong.core.repository; import org.springframework.stereotype.Repository; @Repository public class HelloRepositoryImpl implements HelloRepository { @Override public String get() { return "Hello JUnit 5"; } }
83
1,056
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.netbeans.modules.masterfs.filebasedfs; import java.io.File; import java.io.IOException; import java.util.logging.Level; import java.util.logging.Logger; import org.netbeans.junit.MockServices; import org.netbeans.junit.NbTestCase; import org.netbeans.junit.RandomlyFails; import org.netbeans.modules.masterfs.filebasedfs.fileobjects.BaseFileObj; import org.openide.filesystems.FileLock; import org.openide.filesystems.FileObject; import org.openide.filesystems.FileUtil; import org.openide.util.test.TestFileUtils; import org.netbeans.modules.masterfs.filebasedfs.FileUtilTest.EventType; import org.netbeans.modules.masterfs.filebasedfs.FileUtilTest.TestFileChangeListener; import org.netbeans.modules.masterfs.filebasedfs.fileobjects.FileObjectFactory; import org.netbeans.modules.masterfs.filebasedfs.fileobjects.TestUtils; import org.netbeans.modules.masterfs.providers.ProvidedExtensionsTest; /** * @author <NAME> */ public class FileUtilAddRecursiveListenerTest extends NbTestCase { static { MockServices.setServices(ProvidedExtensionsTest.AnnotationProviderImpl.class); } private final Logger LOG; public FileUtilAddRecursiveListenerTest(String name) { super(name); LOG = Logger.getLogger("TEST." + name); } @Override protected void setUp() throws Exception { System.getProperties().put("org.netbeans.modules.masterfs.watcher.disable", "true"); clearWorkDir(); } @Override protected Level logLevel() { return Level.FINER; } /** Tests FileObject.addRecursiveListener on folder as declared in * {@link FileObject#addRecursiveListener(org.openide.filesystems.FileChangeListener) }. * It is expected that all events from sub folders are delivered just once. */ @RandomlyFails // NB-Core-Build #3874: Wrong number of events when file deleted. expected:<5> but was:<4> public void testAddRecursiveListenerToFileObjectFolder() throws Exception { checkFolderRecursiveListener(false); } /** Tests FileUtil.addRecursiveListener on folder as declared in * {@link FileUtil#addRecursiveListener(org.openide.filesystems.FileChangeListener, java.io.File) }. * It is expected that all events from sub folders are delivered just once. */ @RandomlyFails // NB-Core-Build #4077: Wrong number of events when file was modified. expected:<3> but was:<1> public void testAddRecursiveListenerToFileFolder() throws Exception { checkFolderRecursiveListener(true); } /** Tests addRecursiveListener on folder either added to FileObject or File. * @param isOnFile true to add listener to java.io.File, false to FileObject */ private void checkFolderRecursiveListener(boolean isOnFile) throws Exception { clearWorkDir(); // test files: dir/file1, dir/subdir/subfile, dir/subdir/subsubdir/subsubfile final File rootF = getWorkDir(); final File dirF = new File(rootF, "dir"); File fileF = new File(dirF, "file1"); File subdirF = new File(dirF, "subdir"); File subfileF = new File(subdirF, "subfile"); File subsubdirF = new File(subdirF, "subsubdir"); File subsubfileF = new File(subsubdirF, "subsubfile"); TestFileChangeListener fcl = new TestFileChangeListener(); FileObject dirFO; if (isOnFile) { FileUtil.addRecursiveListener(fcl, dirF); dirFO = FileUtil.createFolder(dirF); assertEquals("Wrong number of events fired when folder created.", 1, fcl.check(EventType.FOLDER_CREATED)); } else { dirFO = FileUtil.createFolder(dirF); dirFO.addRecursiveListener(fcl); } // TestUtils.gcAll(); // create dir FileObject subdirFO = dirFO.createFolder("subdir"); assertEquals("Wrong number of events fired when sub folder created.", 1, fcl.check(EventType.FOLDER_CREATED)); FileObject subsubdirFO = subdirFO.createFolder("subsubdir"); assertEquals("Wrong number of events when sub sub folder created.", 1, fcl.check(EventType.FOLDER_CREATED)); // create file FileObject file1FO = dirFO.createData("file1"); assertEquals("Wrong number of events when data created.", 1, fcl.check(EventType.DATA_CREATED)); FileObject subfileFO = subdirFO.createData("subfile"); assertEquals("Wrong number of events when data in sub folder created.", 1, fcl.check(EventType.DATA_CREATED)); FileObject subsubfileFO = subsubdirFO.createData("subsubfile"); assertEquals("Wrong number of events when data in sub sub folder created.", 1, fcl.check(EventType.DATA_CREATED)); // modify file1FO.getOutputStream().close(); assertEquals("Wrong number of events when file folder modified.", 1, fcl.check(EventType.CHANGED)); subfileFO.getOutputStream().close(); assertEquals("Wrong number of events when file in sub folder modified.", 1, fcl.check(EventType.CHANGED)); subsubfileFO.getOutputStream().close(); assertEquals("Wrong number of events when file in sub sub folder modified.", 1, fcl.check(EventType.CHANGED)); // delete file1FO.delete(); assertEquals("Wrong number of events when child file deleted.", 1, fcl.check(EventType.DELETED)); subsubfileFO.delete(); assertEquals("Wrong number of events when child file in sub sub folder deleted.", 1, fcl.check(EventType.DELETED)); subsubdirFO.delete(); assertEquals("Wrong number of events when sub sub folder deleted.", 1, fcl.check(EventType.DELETED)); subfileFO.delete(); assertEquals("Wrong number of events when child file in sub folder deleted.", 1, fcl.check(EventType.DELETED)); subdirFO.delete(); assertEquals("Wrong number of events when sub folder deleted.", 1, fcl.check(EventType.DELETED)); // atomic action FileUtil.runAtomicAction(new Runnable() { public @Override void run() { try { FileObject rootFO = FileUtil.toFileObject(rootF); rootFO.createFolder("fakedir"); // no events rootFO.setAttribute("fake", "fake"); // no events rootFO.createData("fakefile"); // no events FileObject dirFO = FileUtil.toFileObject(dirF); dirFO.createData("file1"); FileObject subdirFO = dirFO.createFolder("subdir"); subdirFO.createData("subfile"); FileObject subsubdirFO = subdirFO.createFolder("subsubdir"); subsubdirFO.createData("subsubfile"); } catch (IOException ex) { throw new RuntimeException(ex); } } }); // TODO - should be 3 assertEquals("Wrong number of events fired when file was created in atomic action.", 1, fcl.check(EventType.DATA_CREATED)); // TODO - should be 2 assertEquals("Wrong number of events fired when file was created in atomic action.", 1, fcl.check(EventType.FOLDER_CREATED)); assertEquals("No other events should be fired.", 0, fcl.checkAll()); // rename file1FO = dirFO.getFileObject("file1"); subdirFO = dirFO.getFileObject("subdir"); subfileFO = subdirFO.getFileObject("subfile"); subsubdirFO = subdirFO.getFileObject("subsubdir"); subsubfileFO = subsubdirFO.getFileObject("subsubfile"); fcl.clearAll(); FileLock lock = file1FO.lock(); file1FO.rename(lock, "file1Renamed", null); lock.releaseLock(); assertEquals("Wrong number of events when child file renamed.", 1, fcl.check(EventType.RENAMED)); lock = subfileFO.lock(); subfileFO.rename(lock, "subfileRenamed", null); lock.releaseLock(); assertEquals("Wrong number of events when child file in sub folder renamed.", 1, fcl.check(EventType.RENAMED)); lock = subsubfileFO.lock(); subsubfileFO.rename(lock, "subsubfileRenamed", null); lock.releaseLock(); assertEquals("Wrong number of events when child file in sub sub folder renamed.", 1, fcl.check(EventType.RENAMED)); lock = subsubdirFO.lock(); subsubdirFO.rename(lock, "subsubdirRenamed", null); lock.releaseLock(); assertEquals("Wrong number of events when sub sub folder renamed.", 1, fcl.check(EventType.RENAMED)); lock = subdirFO.lock(); subdirFO.rename(lock, "subdirRenamed", null); lock.releaseLock(); assertEquals("Wrong number of events when sub folder renamed.", 1, fcl.check(EventType.RENAMED)); lock = dirFO.lock(); dirFO.rename(lock, "dirRenamed", null); lock.releaseLock(); assertEquals("Wrong number of events when sub folder renamed.", 1, fcl.check(EventType.RENAMED)); lock = dirFO.lock(); dirFO.rename(lock, "dir", null); lock.releaseLock(); /* According to jskrivanek in http://www.netbeans.org/nonav/issues/showattachment.cgi/86910/X.diff, the rename back does not need to * fire an event. Instead the support delivers FOLDER_CREATED event: assertEquals("Wrong number of events when sub folder renamed.", 1, fcl.check(EventType.RENAMED)); assertEquals("Wrong number of events when sub folder renamed.", 1, fcl.check(EventType.FOLDER_CREATED)); fcl.printAll(); assertEquals("No other events should be fired.", 0, fcl.checkAll()); */ // cleanup after rename dirFO.getFileObject("file1Renamed").delete(); dirFO.getFileObject("subdirRenamed").delete(); fcl.clearAll(); // disk changes LOG.log(Level.INFO, "Going to sleep {0}", System.currentTimeMillis()); Thread.sleep(1000); // give OS same time LOG.log(Level.INFO, "Waking up {0}", System.currentTimeMillis()); assertTrue(subsubdirF.mkdirs()); assertTrue(fileF.createNewFile()); assertTrue(subfileF.createNewFile()); assertTrue(subsubfileF.createNewFile()); TestFileUtils.touch(subsubfileF, null); TestFileUtils.touch(subfileF, null); TestFileUtils.touch(fileF, null); LOG.log(Level.INFO, "After refresh {0} to {1}", new Object[]{subsubfileF, subsubfileF.lastModified()}); LOG.log(Level.INFO, "After refresh {0} to {1}", new Object[]{subfileF, subfileF.lastModified()}); LOG.log(Level.INFO, "After refresh {0} to {1}", new Object[]{fileF, fileF.lastModified()}); FileUtil.refreshAll(); fcl.printAll(LOG); // TODO - should be 3 assertEquals("Wrong number of events when file was created.", 1, fcl.check(EventType.DATA_CREATED)); // TODO - should be 2 assertEquals("Wrong number of events when folder created.", 1, fcl.check(EventType.FOLDER_CREATED)); assertEquals("No other events should be fired.", 0, fcl.checkAll()); // TestUtils.gcAll(); TestFileUtils.touch(subsubfileF, null); TestFileUtils.touch(subfileF, null); TestFileUtils.touch(fileF, null); LOG.log(Level.INFO, "Touched {0} to {1}", new Object[]{subsubfileF, subsubfileF.lastModified()}); LOG.log(Level.INFO, "Touched {0} to {1}", new Object[]{subfileF, subfileF.lastModified()}); LOG.log(Level.INFO, "Touched {0} to {1}", new Object[]{fileF, fileF.lastModified()}); FileUtil.refreshAll(); fcl.printAll(LOG); final int expect = fcl.check(EventType.CHANGED); if (expect != 3) { TestUtils.logAll(); } assertEquals("Wrong number of events when file was modified.", 3, expect); fcl.clearAll(); assertTrue(subsubfileF.delete()); assertTrue(subsubdirF.delete()); assertTrue(subfileF.delete()); assertTrue(subdirF.delete()); assertTrue(fileF.delete()); FileUtil.refreshAll(); assertEquals("Wrong number of events when file deleted.", 5, fcl.check(EventType.DELETED)); // delete folder itself dirFO.delete(); assertEquals("Wrong number of events when folder deleted.", 1, fcl.check(EventType.DELETED)); LOG.info("OK"); } public void testProvideExtensionsRefreshRecursively() throws Exception { File root = new File(getWorkDir(), "root"); final File sub = new File(root, "sub"); final File subsub = new File(sub, "subsub"); File subsubdir = new File(subsub, "dir"); subsubdir.mkdirs(); File subfile = new File(sub, "file"); subfile.createNewFile(); File deepfile = new File(subsubdir, "deep"); deepfile.createNewFile(); ProvidedExtensionsTest.ProvidedExtensionsImpl.nextRefreshCall( sub, Long.MAX_VALUE - 10, subfile ); TestFileChangeListener fcl = new TestFileChangeListener(); FileObject rf = FileUtil.toFileObject(root); rf.addRecursiveListener(fcl); BaseFileObj noFO = FileObjectFactory.getInstance(root).getCachedOnly(subsubdir); assertNull("subsub directory has been skipped", noFO); assertEquals("No events", 0, fcl.checkAll()); LOG.log(Level.INFO, "Touching subfile: {0}", deepfile); TestFileUtils.touch(deepfile, null); LOG.log(Level.INFO, "Will do refresh, lastModified: {0}", deepfile.lastModified()); FileUtil.refreshFor(root); LOG.info("Refresh done"); fcl.check(EventType.ATTRIBUTE_CHANGED); // ignore if any fcl.printAll(LOG); assertEquals("No other events", 0, fcl.checkAll()); } }
5,720
5,964
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // This file has been auto-generated by code_generator_v8.py. DO NOT MODIFY! #ifndef V8HTMLMarqueeElement_h #define V8HTMLMarqueeElement_h #include "bindings/core/v8/ScriptWrappable.h" #include "bindings/core/v8/ToV8.h" #include "bindings/core/v8/V8Binding.h" #include "bindings/core/v8/V8DOMWrapper.h" #include "bindings/core/v8/V8HTMLElement.h" #include "bindings/core/v8/WrapperTypeInfo.h" #include "core/CoreExport.h" #include "core/html/HTMLMarqueeElement.h" #include "platform/heap/Handle.h" namespace blink { class V8HTMLMarqueeElement { public: class PrivateScript { public: static bool startMethod(LocalFrame* frame, HTMLMarqueeElement* holderImpl); static bool stopMethod(LocalFrame* frame, HTMLMarqueeElement* holderImpl); static bool createdCallbackMethod(LocalFrame* frame, HTMLMarqueeElement* holderImpl); static bool attachedCallbackMethod(LocalFrame* frame, HTMLMarqueeElement* holderImpl); static bool detachedCallbackMethod(LocalFrame* frame, HTMLMarqueeElement* holderImpl); static bool attributeChangedCallbackMethod(LocalFrame* frame, HTMLMarqueeElement* holderImpl, String name, String oldValue, String newValue); static bool behaviorAttributeGetter(LocalFrame* frame, HTMLMarqueeElement* holderImpl, String* result); static bool behaviorAttributeSetter(LocalFrame* frame, HTMLMarqueeElement* holderImpl, String cppValue); static bool bgColorAttributeGetter(LocalFrame* frame, HTMLMarqueeElement* holderImpl, String* result); static bool bgColorAttributeSetter(LocalFrame* frame, HTMLMarqueeElement* holderImpl, String cppValue); static bool directionAttributeGetter(LocalFrame* frame, HTMLMarqueeElement* holderImpl, String* result); static bool directionAttributeSetter(LocalFrame* frame, HTMLMarqueeElement* holderImpl, String cppValue); static bool heightAttributeGetter(LocalFrame* frame, HTMLMarqueeElement* holderImpl, String* result); static bool heightAttributeSetter(LocalFrame* frame, HTMLMarqueeElement* holderImpl, String cppValue); static bool hspaceAttributeGetter(LocalFrame* frame, HTMLMarqueeElement* holderImpl, unsigned* result); static bool hspaceAttributeSetter(LocalFrame* frame, HTMLMarqueeElement* holderImpl, unsigned cppValue); static bool loopAttributeGetter(LocalFrame* frame, HTMLMarqueeElement* holderImpl, int* result); static bool loopAttributeSetter(LocalFrame* frame, HTMLMarqueeElement* holderImpl, int cppValue); static bool scrollAmountAttributeGetter(LocalFrame* frame, HTMLMarqueeElement* holderImpl, int* result); static bool scrollAmountAttributeSetter(LocalFrame* frame, HTMLMarqueeElement* holderImpl, int cppValue); static bool scrollDelayAttributeGetter(LocalFrame* frame, HTMLMarqueeElement* holderImpl, int* result); static bool scrollDelayAttributeSetter(LocalFrame* frame, HTMLMarqueeElement* holderImpl, int cppValue); static bool trueSpeedAttributeGetter(LocalFrame* frame, HTMLMarqueeElement* holderImpl, bool* result); static bool trueSpeedAttributeSetter(LocalFrame* frame, HTMLMarqueeElement* holderImpl, bool cppValue); static bool vspaceAttributeGetter(LocalFrame* frame, HTMLMarqueeElement* holderImpl, unsigned* result); static bool vspaceAttributeSetter(LocalFrame* frame, HTMLMarqueeElement* holderImpl, unsigned cppValue); static bool widthAttributeGetter(LocalFrame* frame, HTMLMarqueeElement* holderImpl, String* result); static bool widthAttributeSetter(LocalFrame* frame, HTMLMarqueeElement* holderImpl, String cppValue); }; CORE_EXPORT static bool hasInstance(v8::Local<v8::Value>, v8::Isolate*); static v8::Local<v8::Object> findInstanceInPrototypeChain(v8::Local<v8::Value>, v8::Isolate*); CORE_EXPORT static v8::Local<v8::FunctionTemplate> domTemplate(v8::Isolate*); static HTMLMarqueeElement* toImpl(v8::Local<v8::Object> object) { return toScriptWrappable(object)->toImpl<HTMLMarqueeElement>(); } CORE_EXPORT static HTMLMarqueeElement* toImplWithTypeCheck(v8::Isolate*, v8::Local<v8::Value>); CORE_EXPORT static const WrapperTypeInfo wrapperTypeInfo; static void refObject(ScriptWrappable*); static void derefObject(ScriptWrappable*); template<typename VisitorDispatcher> static void trace(VisitorDispatcher visitor, ScriptWrappable* scriptWrappable) { #if ENABLE(OILPAN) visitor->trace(scriptWrappable->toImpl<HTMLMarqueeElement>()); #endif } static const int internalFieldCount = v8DefaultWrapperInternalFieldCount + 0; static void installConditionallyEnabledProperties(v8::Local<v8::Object>, v8::Isolate*) { } static void preparePrototypeObject(v8::Isolate*, v8::Local<v8::Object> prototypeObject, v8::Local<v8::FunctionTemplate> interfaceTemplate) { } }; template <> struct V8TypeOf<HTMLMarqueeElement> { typedef V8HTMLMarqueeElement Type; }; } // namespace blink #endif // V8HTMLMarqueeElement_h
1,811
805
<filename>src/fgmmbin/fgmm-global-est.cc<gh_stars>100-1000 // fgmmbin/fgmm-global-est.cc // Copyright 2009-2011 Saarland University; Microsoft Corporation // See ../../COPYING for clarification regarding multiple authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED // WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, // MERCHANTABLITY OR NON-INFRINGEMENT. // See the Apache 2 License for the specific language governing permissions and // limitations under the License. #include "base/kaldi-common.h" #include "util/common-utils.h" #include "gmm/full-gmm.h" #include "gmm/mle-full-gmm.h" int main(int argc, char *argv[]) { try { using namespace kaldi; typedef int32 int32; MleFullGmmOptions gmm_opts; const char *usage = "Estimate a full-covariance GMM from the accumulated stats.\n" "Usage: fgmm-global-est [options] <model-in> <stats-in> <model-out>\n"; bool binary_write = true; int32 mixup = 0; BaseFloat perturb_factor = 0.01; std::string update_flags_str = "mvw"; ParseOptions po(usage); po.Register("binary", &binary_write, "Write output in binary mode"); po.Register("update-flags", &update_flags_str, "Which GMM parameters will be " "updated: subset of mvw."); po.Register("mix-up", &mixup, "Increase number of mixture components to " "this overall target."); po.Register("perturb-factor", &perturb_factor, "While mixing up, perturb " "means by standard deviation times this factor."); gmm_opts.Register(&po); po.Read(argc, argv); if (po.NumArgs() != 3) { po.PrintUsage(); exit(1); } std::string model_in_filename = po.GetArg(1), stats_filename = po.GetArg(2), model_out_filename = po.GetArg(3); FullGmm fgmm; { bool binary_read; Input ki(model_in_filename, &binary_read); fgmm.Read(ki.Stream(), binary_read); } AccumFullGmm gmm_accs; { bool binary; Input ki(stats_filename, &binary); gmm_accs.Read(ki.Stream(), binary, true /* add accs, doesn't matter */); } { // Update GMMs. BaseFloat objf_impr, count; MleFullGmmUpdate(gmm_opts, gmm_accs, StringToGmmFlags(update_flags_str), &fgmm, &objf_impr, &count); KALDI_LOG << "Overall objective function improvement is " << (objf_impr/count) << " per frame over " << (count) << " frames."; } if (mixup != 0) fgmm.Split(mixup, perturb_factor); WriteKaldiObject(fgmm, model_out_filename, binary_write); KALDI_LOG << "Written model to " << model_out_filename; } catch(const std::exception &e) { std::cerr << e.what() << '\n'; return -1; } }
1,260
788
package org.apache.usergrid.persistence.queue.util; import java.util.*; import com.amazonaws.auth.policy.*; import com.amazonaws.auth.policy.conditions.ArnCondition; import com.amazonaws.services.sqs.model.*; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.usergrid.persistence.queue.LegacyQueueFig; import com.amazonaws.auth.policy.actions.SQSActions; import com.amazonaws.auth.policy.conditions.ConditionFactory; import com.amazonaws.services.sns.AmazonSNSClient; import com.amazonaws.services.sns.model.CreateTopicResult; import com.amazonaws.services.sns.model.ListTopicsResult; import com.amazonaws.services.sns.model.Topic; import com.amazonaws.services.sqs.AmazonSQSClient; /** * Created by <NAME> on 5/25/15. */ public class AmazonNotificationUtils { private static final Logger logger = LoggerFactory.getLogger( AmazonNotificationUtils.class ); public static String createQueue( final AmazonSQSClient sqs, final String queueName, final LegacyQueueFig fig ) throws Exception { final String deadletterQueueName = String.format( "%s_dead", queueName ); final Map<String, String> deadLetterAttributes = new HashMap<>( 2 ); deadLetterAttributes.put( "MessageRetentionPeriod", fig.getDeadletterRetentionPeriod() ); CreateQueueRequest createDeadLetterQueueRequest = new CreateQueueRequest().withQueueName( deadletterQueueName ).withAttributes( deadLetterAttributes ); final CreateQueueResult deadletterResult = sqs.createQueue( createDeadLetterQueueRequest ); logger.info( "Created deadletter queue with url {}", deadletterResult.getQueueUrl() ); final String deadletterArn = AmazonNotificationUtils.getQueueArnByName( sqs, deadletterQueueName ); String redrivePolicy = String .format( "{\"maxReceiveCount\":\"%s\"," + " \"deadLetterTargetArn\":\"%s\"}", fig.getQueueDeliveryLimit(), deadletterArn ); final String visibilityTimeoutInSeconds = String.valueOf(Math.max(1, fig.getVisibilityTimeout() / 1000)); final Map<String, String> queueAttributes = new HashMap<>( 2 ); queueAttributes.put( "MessageRetentionPeriod", fig.getRetentionPeriod() ); queueAttributes.put( "RedrivePolicy", redrivePolicy ); queueAttributes.put( "VisibilityTimeout", visibilityTimeoutInSeconds ); CreateQueueRequest createQueueRequest = new CreateQueueRequest(). withQueueName( queueName ) .withAttributes( queueAttributes ); CreateQueueResult result = sqs.createQueue( createQueueRequest ); String url = result.getQueueUrl(); logger.info( "Created SQS queue with url {}", url ); return url; } public static void setQueuePermissionsToReceive( final AmazonSQSClient sqs, final String queueUrl, final List<String> topicARNs ) throws Exception { // retrieve queue ARN and policy List<String> sqsAttrNames = Arrays.asList(QueueAttributeName.QueueArn.toString(), QueueAttributeName.Policy.toString()); GetQueueAttributesRequest getQueueAttributesRequest = new GetQueueAttributesRequest( queueUrl ).withAttributeNames( sqsAttrNames ); GetQueueAttributesResult queueAttributesResult = sqs.getQueueAttributes( getQueueAttributesRequest ); Map<String, String> sqsAttributeMap = queueAttributesResult.getAttributes(); String queueARN = sqsAttributeMap.get(QueueAttributeName.QueueArn.toString()); String policyJson = sqsAttributeMap.get(QueueAttributeName.Policy.toString()); // cannot send ARN in settings update, so remove it sqsAttributeMap.remove(QueueAttributeName.QueueArn.toString()); // get existing policy from JSON Policy policy = policyJson != null && policyJson.length() > 0 ? Policy.fromJson(policyJson) : new Policy(); // see if permissions already exist, and find ArnLike conditions boolean matchingConditionFound = false; boolean policyEdited = false; for (Statement statement : policy.getStatements()) { logger.info("statement id: {}, effect: {}, action: {}, resources:{}", statement.getId(), statement.getEffect().name(), statement.getActions().get(0).getActionName(), statement.getResources().get(0).getId()); // must be Allow effect if (! statement.getEffect().name().equals(Statement.Effect.Allow.name())) { continue; } // must be SendMessage action boolean actionFound = false; for (Action action : statement.getActions()) { // do lower case comparison, since UI adds SQS.SendMessage but SDK uses sqs.SendMessage if (action.getActionName().toLowerCase().equals(SQSActions.SendMessage.getActionName().toLowerCase())) { actionFound = true; break; } } if (!actionFound) { continue; } // must be same queue resource boolean queueResourceFound = false; for (Resource resource : statement.getResources()) { if (resource.getId().equals(queueARN)) { queueResourceFound = true; break; } } if (!queueResourceFound) { continue; } // found matching statement, check conditions for source ARN for (Condition condition : statement.getConditions()) { if (logger.isTraceEnabled()) { logger.trace("condition type: {}, conditionKey: {}", condition.getType(), condition.getConditionKey()); } if (condition.getType().equals(ArnCondition.ArnComparisonType.ArnLike.name()) && condition.getConditionKey().equals(ConditionFactory.SOURCE_ARN_CONDITION_KEY)) { matchingConditionFound = true; for (String topicARN : topicARNs) { if (! condition.getValues().contains(topicARN)) { // topic doesn't exist, add it policyEdited = true; condition.getValues().add(topicARN); } } } } } if (!matchingConditionFound) { // never found ArnLike SourceArn condition, need to add a statement List<Condition> conditions = new ArrayList<>(); for (String topicARN : topicARNs) { conditions.add(ConditionFactory.newSourceArnCondition(topicARN)); } Statement statement = new Statement(Statement.Effect.Allow) .withPrincipals(Principal.AllUsers) .withActions(SQSActions.SendMessage) .withResources(new Resource(queueARN)); statement.setConditions(conditions); policy.getStatements().add(statement); policyEdited = true; } if (policyEdited) { sqsAttributeMap.put(QueueAttributeName.Policy.toString(), policy.toJson()); // log if permissions are being updated logger.info("updating permissions for queueARN: {}, new policy: {}", queueARN, policy.toJson()); SetQueueAttributesRequest setQueueAttributesRequest = new SetQueueAttributesRequest(queueUrl, sqsAttributeMap); try { sqs.setQueueAttributes(setQueueAttributesRequest); } catch (Exception e) { logger.error("Failed to set permissions on QUEUE ARN=[{}] for TOPIC ARNs=[{}]", queueARN, topicARNs.toString(), e); } } } public static String getQueueArnByName( final AmazonSQSClient sqs, final String queueName ) throws Exception { String queueUrl = null; try { GetQueueUrlResult result = sqs.getQueueUrl( queueName ); queueUrl = result.getQueueUrl(); } catch ( QueueDoesNotExistException queueDoesNotExistException ) { //no op, swallow logger.warn( "Queue {} does not exist", queueName ); return null; } catch ( Exception e ) { logger.error( "Failed to get URL for Queue [{}] from SQS", queueName, e ); throw e; } if ( queueUrl != null ) { try { GetQueueAttributesRequest queueAttributesRequest = new GetQueueAttributesRequest( queueUrl ).withAttributeNames( "All" ); GetQueueAttributesResult queueAttributesResult = sqs.getQueueAttributes( queueAttributesRequest ); Map<String, String> sqsAttributeMap = queueAttributesResult.getAttributes(); return sqsAttributeMap.get( "QueueArn" ); } catch ( Exception e ) { logger.error( "Failed to get queue URL from service", e ); throw e; } } return null; } public static String getQueueArnByUrl( final AmazonSQSClient sqs, final String queueUrl ) throws Exception { try { GetQueueAttributesRequest queueAttributesRequest = new GetQueueAttributesRequest( queueUrl ).withAttributeNames( "All" ); GetQueueAttributesResult queueAttributesResult = sqs.getQueueAttributes( queueAttributesRequest ); Map<String, String> sqsAttributeMap = queueAttributesResult.getAttributes(); return sqsAttributeMap.get( "QueueArn" ); } catch ( Exception e ) { logger.error( "Failed to get queue URL from service", e ); throw e; } } public static String getTopicArn( final AmazonSNSClient sns, final String queueName, final boolean createOnMissing ) throws Exception { if ( logger.isTraceEnabled() ) { logger.trace( "Looking up Topic ARN: {}", queueName ); } ListTopicsResult listTopicsResult = sns.listTopics(); String topicArn = null; for ( Topic topic : listTopicsResult.getTopics() ) { String arn = topic.getTopicArn(); if ( queueName.equals( arn.substring( arn.lastIndexOf( ':' ) ) ) ) { topicArn = arn; if (logger.isTraceEnabled()) { logger.trace( "Found existing topic arn=[{}] for queue=[{}]", topicArn, queueName ); } } } if ( topicArn == null && createOnMissing ) { if (logger.isTraceEnabled()) { logger.trace("Creating topic for queue=[{}]...", queueName); } CreateTopicResult createTopicResult = sns.createTopic( queueName ); topicArn = createTopicResult.getTopicArn(); if (logger.isTraceEnabled()) { logger.trace("Successfully created topic with name {} and arn {}", queueName, topicArn); } } else { logger.error( "Error looking up topic ARN for queue=[{}] and createOnMissing=[{}]", queueName, createOnMissing ); } if ( logger.isTraceEnabled() ) { logger.trace( "Returning Topic ARN=[{}] for Queue=[{}]", topicArn, queueName ); } return topicArn; } public static String getQueueUrlByName( final AmazonSQSClient sqs, final String queueName ) { try { GetQueueUrlResult result = sqs.getQueueUrl( queueName ); return result.getQueueUrl(); } catch ( QueueDoesNotExistException e ) { //no op, return null logger.error( "Queue {} does not exist", queueName ); return null; } catch ( Exception e ) { logger.error( "failed to get queue from service", e ); throw e; } } }
5,233
1,936
<gh_stars>1000+ #ifndef ROVIOLI_IMU_CAMERA_SYNCHRONIZER_FLOW_H_ #define ROVIOLI_IMU_CAMERA_SYNCHRONIZER_FLOW_H_ #include <aslam/cameras/ncamera.h> #include <message-flow/message-flow.h> #include <sensors/imu.h> #include <vio-common/vio-types.h> #include "rovioli/flow-topics.h" #include "rovioli/imu-camera-synchronizer.h" namespace rovioli { class ImuCameraSynchronizerFlow { public: explicit ImuCameraSynchronizerFlow(const aslam::NCamera::Ptr& camera_system) : synchronizing_pipeline_(camera_system) { CHECK(camera_system); } ~ImuCameraSynchronizerFlow() { shutdown(); } void attachToMessageFlow(message_flow::MessageFlow* flow) { CHECK_NOTNULL(flow); static constexpr char kSubscriberNodeName[] = "ImuCameraSynchronizerFlow"; // Image input. flow->registerSubscriber<message_flow_topics::IMAGE_MEASUREMENTS>( kSubscriberNodeName, message_flow::DeliveryOptions(), [this](const vio::ImageMeasurement::Ptr& image) { CHECK(image); this->synchronizing_pipeline_.addCameraImage( image->camera_index, image->image, image->timestamp); }); // IMU input. flow->registerSubscriber<message_flow_topics::IMU_MEASUREMENTS>( kSubscriberNodeName, message_flow::DeliveryOptions(), [this](const vio::ImuMeasurement::Ptr& imu) { CHECK(imu); // TODO(schneith): This seems inefficient. Should we batch IMU // measurements on the datasource side? this->synchronizing_pipeline_.addImuMeasurements( (Eigen::Matrix<int64_t, 1, 1>() << imu->timestamp).finished(), imu->imu_data); }); // Tracked nframes and IMU output. synchronizing_pipeline_.registerSynchronizedNFrameImuCallback( flow->registerPublisher<message_flow_topics::SYNCED_NFRAMES_AND_IMU>()); } void shutdown() { synchronizing_pipeline_.shutdown(); } private: ImuCameraSynchronizer synchronizing_pipeline_; }; } // namespace rovioli #endif // ROVIOLI_IMU_CAMERA_SYNCHRONIZER_FLOW_H_
869
322
<reponame>mindThomas/acados /* This file was automatically generated by CasADi. The CasADi copyright holders make no ownership claim of its contents. */ #ifdef __cplusplus extern "C" { #endif /* How to prefix internal symbols */ #ifdef CODEGEN_PREFIX #define NAMESPACE_CONCAT(NS, ID) _NAMESPACE_CONCAT(NS, ID) #define _NAMESPACE_CONCAT(NS, ID) NS ## ID #define CASADI_PREFIX(ID) NAMESPACE_CONCAT(CODEGEN_PREFIX, ID) #else #define CASADI_PREFIX(ID) pendulum_ode_impl_ode_jac_x_xdot_u_z_ ## ID #endif #include <math.h> #ifndef casadi_real #define casadi_real double #endif #ifndef casadi_int #define casadi_int int #endif /* Add prefix to internal symbols */ #define casadi_f0 CASADI_PREFIX(f0) #define casadi_s0 CASADI_PREFIX(s0) #define casadi_s1 CASADI_PREFIX(s1) #define casadi_s2 CASADI_PREFIX(s2) #define casadi_s3 CASADI_PREFIX(s3) #define casadi_s4 CASADI_PREFIX(s4) #define casadi_s5 CASADI_PREFIX(s5) #define casadi_s6 CASADI_PREFIX(s6) /* Symbol visibility in DLLs */ #ifndef CASADI_SYMBOL_EXPORT #if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) #if defined(STATIC_LINKED) #define CASADI_SYMBOL_EXPORT #else #define CASADI_SYMBOL_EXPORT __declspec(dllexport) #endif #elif defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) #define CASADI_SYMBOL_EXPORT __attribute__ ((visibility ("default"))) #else #define CASADI_SYMBOL_EXPORT #endif #endif static const casadi_int casadi_s0[8] = {4, 1, 0, 4, 0, 1, 2, 3}; static const casadi_int casadi_s1[5] = {1, 1, 0, 1, 0}; static const casadi_int casadi_s2[3] = {0, 0, 0}; static const casadi_int casadi_s3[13] = {4, 4, 0, 0, 1, 3, 6, 0, 1, 3, 1, 2, 3}; static const casadi_int casadi_s4[11] = {4, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3}; static const casadi_int casadi_s5[6] = {4, 1, 0, 2, 1, 3}; static const casadi_int casadi_s6[3] = {4, 0, 0}; /* pendulum_ode_impl_ode_jac_x_xdot_u_z:(i0[4],i1[4],i2,i3[])->(o0[4x4,6nz],o1[4x4,4nz],o2[4x1,2nz],o3[4x0]) */ static int casadi_f0(const casadi_real** arg, casadi_real** res, casadi_int* iw, casadi_real* w, void* mem) { casadi_real a0, a1, a10, a11, a12, a13, a14, a15, a2, a3, a4, a5, a6, a7, a8, a9; a0=-1.; if (res[0]!=0) res[0][0]=a0; a1=arg[0] ? arg[0][3] : 0; a2=-8.0000000000000016e-02; a3=arg[0] ? arg[0][2] : 0; a4=cos(a3); a4=(a2*a4); a4=(a1*a4); a4=(a1*a4); a5=9.8100000000000009e-01; a6=cos(a3); a6=(a5*a6); a7=cos(a3); a7=(a6*a7); a8=sin(a3); a9=sin(a3); a5=(a5*a9); a5=(a8*a5); a7=(a7-a5); a4=(a4+a7); a7=1.1000000000000001e+00; a5=1.0000000000000001e-01; a9=cos(a3); a9=(a5*a9); a10=cos(a3); a11=(a9*a10); a7=(a7-a11); a4=(a4/a7); a11=sin(a3); a11=(a2*a11); a12=(a11*a1); a13=(a12*a1); a6=(a6*a8); a13=(a13+a6); a6=arg[2] ? arg[2][0] : 0; a13=(a13+a6); a13=(a13/a7); a13=(a13/a7); a8=sin(a3); a5=(a5*a8); a10=(a10*a5); a5=sin(a3); a9=(a9*a5); a10=(a10+a9); a13=(a13*a10); a4=(a4-a13); a4=(-a4); if (res[0]!=0) res[0][1]=a4; a4=cos(a3); a4=(a2*a4); a13=cos(a3); a13=(a4*a13); a9=sin(a3); a5=sin(a3); a2=(a2*a5); a2=(a9*a2); a13=(a13-a2); a13=(a1*a13); a13=(a1*a13); a2=sin(a3); a2=(a6*a2); a13=(a13-a2); a2=1.0791000000000002e+01; a5=cos(a3); a5=(a2*a5); a13=(a13+a5); a5=8.0000000000000004e-01; a8=(a5*a7); a13=(a13/a8); a4=(a4*a9); a9=(a4*a1); a14=(a9*a1); a15=cos(a3); a6=(a6*a15); a14=(a14+a6); a3=sin(a3); a2=(a2*a3); a14=(a14+a2); a14=(a14/a8); a14=(a14/a8); a5=(a5*a10); a14=(a14*a5); a13=(a13-a14); a13=(-a13); if (res[0]!=0) res[0][2]=a13; a11=(a1*a11); a11=(a11+a12); a11=(a11/a7); a11=(-a11); if (res[0]!=0) res[0][3]=a11; if (res[0]!=0) res[0][4]=a0; a1=(a1*a4); a1=(a1+a9); a1=(a1/a8); a1=(-a1); if (res[0]!=0) res[0][5]=a1; a1=1.; if (res[1]!=0) res[1][0]=a1; if (res[1]!=0) res[1][1]=a1; if (res[1]!=0) res[1][2]=a1; if (res[1]!=0) res[1][3]=a1; a7=(1./a7); a7=(-a7); if (res[2]!=0) res[2][0]=a7; a15=(a15/a8); a15=(-a15); if (res[2]!=0) res[2][1]=a15; return 0; } CASADI_SYMBOL_EXPORT int pendulum_ode_impl_ode_jac_x_xdot_u_z(const casadi_real** arg, casadi_real** res, casadi_int* iw, casadi_real* w, void* mem){ return casadi_f0(arg, res, iw, w, mem); } CASADI_SYMBOL_EXPORT void pendulum_ode_impl_ode_jac_x_xdot_u_z_incref(void) { } CASADI_SYMBOL_EXPORT void pendulum_ode_impl_ode_jac_x_xdot_u_z_decref(void) { } CASADI_SYMBOL_EXPORT casadi_int pendulum_ode_impl_ode_jac_x_xdot_u_z_n_in(void) { return 4;} CASADI_SYMBOL_EXPORT casadi_int pendulum_ode_impl_ode_jac_x_xdot_u_z_n_out(void) { return 4;} CASADI_SYMBOL_EXPORT const char* pendulum_ode_impl_ode_jac_x_xdot_u_z_name_in(casadi_int i){ switch (i) { case 0: return "i0"; case 1: return "i1"; case 2: return "i2"; case 3: return "i3"; default: return 0; } } CASADI_SYMBOL_EXPORT const char* pendulum_ode_impl_ode_jac_x_xdot_u_z_name_out(casadi_int i){ switch (i) { case 0: return "o0"; case 1: return "o1"; case 2: return "o2"; case 3: return "o3"; default: return 0; } } CASADI_SYMBOL_EXPORT const casadi_int* pendulum_ode_impl_ode_jac_x_xdot_u_z_sparsity_in(casadi_int i) { switch (i) { case 0: return casadi_s0; case 1: return casadi_s0; case 2: return casadi_s1; case 3: return casadi_s2; default: return 0; } } CASADI_SYMBOL_EXPORT const casadi_int* pendulum_ode_impl_ode_jac_x_xdot_u_z_sparsity_out(casadi_int i) { switch (i) { case 0: return casadi_s3; case 1: return casadi_s4; case 2: return casadi_s5; case 3: return casadi_s6; default: return 0; } } CASADI_SYMBOL_EXPORT int pendulum_ode_impl_ode_jac_x_xdot_u_z_work(casadi_int *sz_arg, casadi_int* sz_res, casadi_int *sz_iw, casadi_int *sz_w) { if (sz_arg) *sz_arg = 4; if (sz_res) *sz_res = 4; if (sz_iw) *sz_iw = 0; if (sz_w) *sz_w = 0; return 0; } #ifdef __cplusplus } /* extern "C" */ #endif
3,254
891
<filename>tests/data/expected/main/main_jsonschema_external_files/output.py # generated by datamodel-codegen: # filename: external_parent_root.json # timestamp: 2019-07-26T00:00:00+00:00 from __future__ import annotations from typing import Optional from pydantic import BaseModel class ExternalChildRoot(BaseModel): somefield: Optional[int] = None class Object(BaseModel): metadata: ExternalChildRoot
136
3,101
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.dtstack.flinkx.connector.kafka.source; import com.dtstack.flinkx.restore.FormatState; import org.apache.flink.api.common.eventtime.WatermarkStrategy; import org.apache.flink.api.common.functions.RuntimeContext; import org.apache.flink.api.common.state.CheckpointListener; import org.apache.flink.api.common.state.ListState; import org.apache.flink.api.common.state.ListStateDescriptor; import org.apache.flink.api.common.state.OperatorStateStore; import org.apache.flink.api.common.typeinfo.TypeHint; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.api.java.typeutils.ResultTypeQueryable; import org.apache.flink.configuration.Configuration; import org.apache.flink.runtime.state.FunctionInitializationContext; import org.apache.flink.runtime.state.FunctionSnapshotContext; import org.apache.flink.streaming.api.checkpoint.CheckpointedFunction; import org.apache.flink.streaming.api.functions.source.RichParallelSourceFunction; import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumerBase; import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition; import org.apache.flink.table.data.RowData; import com.esotericsoftware.kryo.Kryo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.regex.Pattern; /** * @author chuixue * @create 2021-05-07 14:46 * @description */ public class KafkaConsumerWrapper extends RichParallelSourceFunction<RowData> implements CheckpointListener, ResultTypeQueryable<RowData>, CheckpointedFunction { protected static final Logger LOG = LoggerFactory.getLogger(KafkaConsumerWrapper.class); private static final long serialVersionUID = 1L; private static final String LOCATION_STATE_NAME = "data-sync-location-states"; private final FlinkKafkaConsumerBase<RowData> flinkKafkaConsumer; private final DynamicKafkaDeserializationSchema deserializationSchema; private final Properties props; private transient ListState<FormatState> unionOffsetStates; private Map<Integer, FormatState> formatStateMap; public KafkaConsumerWrapper( List<String> topics, DynamicKafkaDeserializationSchema deserializer, Properties props) { Properties originalProps = new Kryo().copy(props); flinkKafkaConsumer = new FlinkKafkaConsumer<>(topics, deserializer, props, originalProps, deserializer); this.deserializationSchema = deserializer; this.props = originalProps; } public KafkaConsumerWrapper( Pattern subscriptionPattern, DynamicKafkaDeserializationSchema deserializer, Properties props) { Properties originalProps = new Kryo().copy(props); flinkKafkaConsumer = new FlinkKafkaConsumer<>( subscriptionPattern, deserializer, props, originalProps, deserializer); this.deserializationSchema = deserializer; this.props = originalProps; } @Override public void setRuntimeContext(RuntimeContext t) { super.setRuntimeContext(t); flinkKafkaConsumer.setRuntimeContext(t); } @Override public void open(Configuration configuration) throws Exception { deserializationSchema.setRuntimeContext(getRuntimeContext()); deserializationSchema.setConsumerConfig(props); if (formatStateMap != null) { deserializationSchema.setFormatState( formatStateMap.get(getRuntimeContext().getIndexOfThisSubtask())); } flinkKafkaConsumer.open(configuration); } @Override public void snapshotState(FunctionSnapshotContext context) throws Exception { flinkKafkaConsumer.snapshotState(context); FormatState formatState = deserializationSchema.getFormatState(); if (formatState != null) { LOG.info("InputFormat format state:{}", formatState); unionOffsetStates.clear(); unionOffsetStates.add(formatState); } } @Override public void initializeState(FunctionInitializationContext context) throws Exception { flinkKafkaConsumer.initializeState(context); OperatorStateStore stateStore = context.getOperatorStateStore(); LOG.info("Start initialize input format state, is restored:{}", context.isRestored()); unionOffsetStates = stateStore.getUnionListState( new ListStateDescriptor<>( LOCATION_STATE_NAME, TypeInformation.of(new TypeHint<FormatState>() {}))); if (context.isRestored()) { formatStateMap = new HashMap<>(16); for (FormatState formatState : unionOffsetStates.get()) { formatStateMap.put(formatState.getNumOfSubTask(), formatState); LOG.info("Input format state into:{}", formatState); } } LOG.info("End initialize input format state"); } @Override public void close() throws Exception { flinkKafkaConsumer.close(); deserializationSchema.close(); } @Override public void notifyCheckpointComplete(long checkpointId) throws Exception { flinkKafkaConsumer.notifyCheckpointComplete(checkpointId); } @Override public TypeInformation<RowData> getProducedType() { return flinkKafkaConsumer.getProducedType(); } @Override public void run(SourceContext<RowData> ctx) throws Exception { flinkKafkaConsumer.run(ctx); } @Override public void cancel() { flinkKafkaConsumer.cancel(); } public void setStartFromEarliest() { flinkKafkaConsumer.setStartFromEarliest(); } public void setStartFromLatest() { flinkKafkaConsumer.setStartFromLatest(); } public void setStartFromGroupOffsets() { flinkKafkaConsumer.setStartFromGroupOffsets(); } public void setStartFromSpecificOffsets(Map<KafkaTopicPartition, Long> specificStartupOffsets) { flinkKafkaConsumer.setStartFromSpecificOffsets(specificStartupOffsets); } public void setStartFromTimestamp(long startupOffsetsTimestamp) { flinkKafkaConsumer.setStartFromTimestamp(startupOffsetsTimestamp); } public void setCommitOffsetsOnCheckpoints(boolean commitOnCheckpoints) { flinkKafkaConsumer.setCommitOffsetsOnCheckpoints(commitOnCheckpoints); } public void assignTimestampsAndWatermarks(WatermarkStrategy<RowData> watermarkStrategy) { flinkKafkaConsumer.assignTimestampsAndWatermarks(watermarkStrategy); } }
2,701
2,047
<reponame>sivchand/smart_open # -*- coding: utf-8 -*- # # Copyright (C) 2019 <NAME> <<EMAIL>> # # This code is distributed under the terms and conditions # from the MIT License (MIT). # import logging import boto3 from smart_open import open # # These are publicly available via play.min.io # KEY_ID = 'Q<KEY>' SECRET_KEY = 'zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG' ENDPOINT_URL = 'https://play.min.io:9000' def read_boto3(): """Read directly using boto3.""" session = get_minio_session() s3 = session.resource('s3', endpoint_url=ENDPOINT_URL) obj = s3.Object('smart-open-test', 'README.rst') data = obj.get()['Body'].read() logging.info('read %d bytes via boto3', len(data)) return data def read_smart_open(): url = 's3://Q3AM3UQ867SPQQA43P2F:[email protected]:9000@smart-open-test/README.rst' # noqa # # If the default region is not us-east-1, we need to construct our own # session. This is because smart_open will create a session in the default # region, which _must_ be us-east-1 for minio to work. # tp = {} if get_default_region() != 'us-east-1': logging.info('injecting custom session') tp['session'] = get_minio_session() with open(url, transport_params=tp) as fin: text = fin.read() logging.info('read %d characters via smart_open', len(text)) return text def get_minio_session(): return boto3.Session( region_name='us-east-1', aws_access_key_id=KEY_ID, aws_secret_access_key=SECRET_KEY, ) def get_default_region(): return boto3.Session().region_name def main(): logging.basicConfig(level=logging.INFO) from_boto3 = read_boto3() from_smart_open = read_smart_open() assert from_boto3.decode('utf-8') == from_smart_open if __name__ == '__main__': main()
794
557
<reponame>cockcrow/python-mammoth from nose.tools import istest, assert_equal from mammoth.styles.parser.tokeniser import Token, TokenType from mammoth.styles.parser.token_parser import decode_escape_sequences, parse_identifier, parse_string from mammoth.styles.parser.token_iterator import TokenIterator @istest def escape_sequences_in_identifiers_are_decoded(): assert_equal( ":", parse_identifier(TokenIterator([ Token(0, TokenType.IDENTIFIER, r"\:"), ])), ) @istest def escape_sequences_in_strings_are_decoded(): assert_equal( "\n", parse_string(TokenIterator([ Token(0, TokenType.STRING, r"'\n'"), ])), ) @istest def line_feeds_are_decoded(): assert_equal("\n", decode_escape_sequences(r"\n")) @istest def carriage_returns_are_decoded(): assert_equal("\r", decode_escape_sequences(r"\r")) @istest def tabs_are_decoded(): assert_equal("\t", decode_escape_sequences(r"\t")) @istest def backslashes_are_decoded(): assert_equal("\\", decode_escape_sequences(r"\\")) @istest def colons_are_decoded(): assert_equal(":", decode_escape_sequences(r"\:"))
472
1,333
package org.xujin.moss.model.trace; import java.io.Serializable; public class TopologyResult implements Serializable { private TopologyData data; public TopologyData getData() { return data; } public void setData(TopologyData data) { this.data = data; } }
109
1,194
<reponame>mbw-ahc/hapi-fhir package ca.uhn.fhir.jpa.interceptor.ex; import ca.uhn.fhir.interceptor.api.Hook; import ca.uhn.fhir.interceptor.api.Interceptor; import ca.uhn.fhir.interceptor.api.Pointcut; import ca.uhn.fhir.interceptor.model.RequestPartitionId; // This class is replicated in PartitionExamples.java -- Keep it up to date there too!! @Interceptor public class PartitionInterceptorReadAllPartitions { @Hook(Pointcut.STORAGE_PARTITION_IDENTIFY_READ) public RequestPartitionId readPartition() { return RequestPartitionId.allPartitions(); } }
207
930
<reponame>zhenchai/pigeon<gh_stars>100-1000 package com.dianping.pigeon.remoting.common.codec.fst; import java.io.IOException; import java.math.BigInteger; import de.ruedigermoeller.serialization.FSTBasicObjectSerializer; import de.ruedigermoeller.serialization.FSTClazzInfo; import de.ruedigermoeller.serialization.FSTClazzInfo.FSTFieldInfo; import de.ruedigermoeller.serialization.FSTObjectInput; import de.ruedigermoeller.serialization.FSTObjectOutput; public class FSTBigIntegerSerializer extends FSTBasicObjectSerializer { @Override public void writeObject(FSTObjectOutput out, Object toWrite, FSTClazzInfo clzInfo, FSTFieldInfo referencedBy, int streamPosition) throws IOException { byte[] value = ((BigInteger) toWrite).toByteArray(); out.writeInt(value.length); out.write(value); } @Override public Object instantiate(Class objectClass, FSTObjectInput in, FSTClazzInfo serializationInfo, FSTFieldInfo referencee, int streamPosition) throws IOException, ClassNotFoundException, InstantiationException, IllegalAccessException { byte[] buf = new byte[in.readInt()]; in.read(buf); BigInteger result = new BigInteger(buf); in.registerObject(result, streamPosition, serializationInfo, referencee); return result; } }
402
2,151
//===-- DomPrinter.h - Dom printer external interface ------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines external functions that can be called to explicitly // instantiate the dominance tree printer. // //===----------------------------------------------------------------------===// #ifndef LLVM_ANALYSIS_DOMPRINTER_H #define LLVM_ANALYSIS_DOMPRINTER_H namespace llvm { class FunctionPass; FunctionPass *createDomPrinterPass(); FunctionPass *createDomOnlyPrinterPass(); FunctionPass *createDomViewerPass(); FunctionPass *createDomOnlyViewerPass(); FunctionPass *createPostDomPrinterPass(); FunctionPass *createPostDomOnlyPrinterPass(); FunctionPass *createPostDomViewerPass(); FunctionPass *createPostDomOnlyViewerPass(); } // End llvm namespace #endif
280
937
# -*- coding: utf-8 -*- import scrapy class RenrenSpider(scrapy.Spider): """ 使用formrequest进行post请求 """ name = 'renren' allowed_domains = ['renren.com'] start_urls = ['http://www.renren.com/PLogin.do'] # 实现post请求需要重写start_request方法,不重写的话,默认的start_requests方法使用的GET请求 def start_requests(self): url = self.start_urls[0] post_data = { 'email': '18949599846', 'password':'<PASSWORD>' } # 发送post yield scrapy.FormRequest(url=url, formdata=post_data, callback=self.parse) def parse(self, response): # 获取post请求的响应数据 with open('renren.html','wb') as f: f.write(response.body)
409
1,521
/** * Copyright 2020 Alibaba Group Holding Limited. * * <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file * except in compliance with the License. You may obtain a copy of the License at * * <p>http://www.apache.org/licenses/LICENSE-2.0 * * <p>Unless required by applicable law or agreed to in writing, software distributed under the * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.alibaba.maxgraph.tinkerpop.steps; import org.apache.tinkerpop.gremlin.process.computer.Memory; import org.apache.tinkerpop.gremlin.process.computer.VertexProgram; import org.apache.tinkerpop.gremlin.process.computer.traversal.step.map.VertexProgramStep; import org.apache.tinkerpop.gremlin.process.traversal.Traversal; import org.apache.tinkerpop.gremlin.process.traversal.step.Configuring; import org.apache.tinkerpop.gremlin.process.traversal.step.TraversalParent; import org.apache.tinkerpop.gremlin.process.traversal.step.util.Parameters; import org.apache.tinkerpop.gremlin.process.traversal.util.PureTraversal; import org.apache.tinkerpop.gremlin.structure.Edge; import org.apache.tinkerpop.gremlin.structure.Graph; import org.apache.tinkerpop.gremlin.structure.Vertex; import java.util.Collections; import java.util.List; public class LpaVertexProgramStep extends VertexProgramStep implements TraversalParent, Configuring { public static final String TARGET_LABEL = "label"; public static final String SEED_LABEL = "id"; public static final int MAX_ITERATION = 20; private Parameters parameters = new Parameters(); private int maxIterations = MAX_ITERATION; private String property = TARGET_LABEL; private String label = SEED_LABEL; private PureTraversal<Vertex, Edge> edgeTraversal; public LpaVertexProgramStep(Traversal.Admin traversal) { super(traversal); } public String getProperty() { return property; } public int getMaxIterations() { return maxIterations; } public String getLabel() { return label; } @Override public List<Traversal.Admin<Vertex, Edge>> getLocalChildren() { return Collections.singletonList(this.edgeTraversal.get()); } @Override public void configure(Object... keyValues) { Object key = keyValues[0]; Object value = keyValues[1]; if (key.equals(Lpa.PROPERTY_NAME)) { if (!(value instanceof String)) { throw new IllegalArgumentException("property requires a String as its argument"); } this.property = (String) value; } else if (key.equals(Lpa.SEED_PROPERTY)) { if (!(value instanceof String)) { throw new IllegalArgumentException("label requires a String as its argument"); } this.label = (String) value; } else if (key.equals(Lpa.TIMES)) { if (!(value instanceof Integer)) { throw new IllegalArgumentException("times requires an Integer as its argument"); } this.maxIterations = (int) value; } if (key.equals(Lpa.EDGES)) { if (!(value instanceof Traversal)) { throw new IllegalArgumentException( "edges in LPA requires an String as its argument"); } this.edgeTraversal = new PureTraversal<>(((Traversal<Vertex, Edge>) value).asAdmin()); this.integrateChild(this.edgeTraversal.get()); } else { this.parameters.set(this, keyValues); } } @Override public Parameters getParameters() { return parameters; } @Override public VertexProgram generateProgram(Graph graph, Memory memory) { throw new IllegalArgumentException(); } }
1,491
578
/* * Tencent is pleased to support the open source community by making BK-JOB蓝鲸智云作业平台 available. * * Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. * * BK-JOB蓝鲸智云作业平台 is licensed under the MIT License. * * License for BK-JOB蓝鲸智云作业平台: * -------------------------------------------------------------------- * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated * documentation files (the "Software"), to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and * to permit persons to whom the Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all copies or substantial portions of * the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO * THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ package com.tencent.bk.job.manage.dao.globalsetting.impl; import com.tencent.bk.job.manage.dao.globalsetting.DangerousRuleDAO; import org.jooq.DSLContext; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.test.context.SpringBootTest; import org.springframework.test.context.ActiveProfiles; import org.springframework.test.context.TestPropertySource; import org.springframework.test.context.jdbc.Sql; import org.springframework.test.context.jdbc.SqlConfig; import org.springframework.test.context.junit.jupiter.SpringExtension; import static org.assertj.core.api.Assertions.assertThat; /** * @since 8/12/2020 11:56 */ @ExtendWith(SpringExtension.class) @SpringBootTest @ActiveProfiles("test") @TestPropertySource(locations = "classpath:test.properties") @Sql(value = {"/init_dangerous_rule_data.sql"}) @SqlConfig(encoding = "utf-8") class DangerousRuleDAOImplIntegrationTest { @Autowired private DangerousRuleDAO dangerousRuleDAO; @Autowired private DSLContext dslContext; @Test void listDangerousRulesByScriptType() { assertThat(dangerousRuleDAO.listDangerousRulesByScriptType(dslContext, 1)).hasSize(5); assertThat(dangerousRuleDAO.listDangerousRulesByScriptType(dslContext, 2)).hasSize(4); assertThat(dangerousRuleDAO.listDangerousRulesByScriptType(dslContext, 3)).hasSize(3); assertThat(dangerousRuleDAO.listDangerousRulesByScriptType(dslContext, 4)).hasSize(2); assertThat(dangerousRuleDAO.listDangerousRulesByScriptType(dslContext, 5)).hasSize(1); assertThat(dangerousRuleDAO.listDangerousRulesByScriptType(dslContext, 6)).hasSize(0); assertThat(dangerousRuleDAO.listDangerousRulesByScriptType(dslContext, 7)).hasSize(0); assertThat(dangerousRuleDAO.listDangerousRulesByScriptType(dslContext, 8)).hasSize(0); } }
1,114
902
/* fullbench.c - Demo program to benchmark open-source compression algorithm Copyright (C) <NAME> 2012-2015 GPL v2 License This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. You can contact the author at : - public forum : https://groups.google.com/forum/#!forum/lz4c - website : http://fastcompression.blogspot.com/ */ /*===== Compiler's specifics =====*/ #define _CRT_SECURE_NO_WARNINGS /* Remove Visual warning */ /*_************************************ * Includes **************************************/ #include <stdlib.h> /* malloc */ #include <stdio.h> /* fprintf, fopen, ftello64 */ #include <string.h> /* strcmp */ #include <time.h> /* clock_t, clock, CLOCKS_PER_SEC */ #include "cpu.h" /* ZSTD_cpuid_bmi2, ZSTD_cpuid */ #include "mem.h" #include "hist.h" #define FSE_STATIC_LINKING_ONLY #include "fse.h" #define HUF_STATIC_LINKING_ONLY #include "huf.h" #include "xxhash.h" /*_************************************ * Constants **************************************/ #define PROGRAM_DESCRIPTION "FSE speed analyzer" #ifndef FSE_VERSION # define FSE_VERSION "" #endif #define AUTHOR "<NAME>" #define WELCOME_MESSAGE "*** %s %s %i-bits, by %s (%s) ***\n", PROGRAM_DESCRIPTION, FSE_VERSION, (int)(sizeof(void*)*8), AUTHOR, __DATE__ #define NBLOOPS 6 #define TIMELOOP_S 2 #define TIMELOOP (TIMELOOP_S * CLOCKS_PER_SEC) #define BENCHCLOCK_MIN (CLOCKS_PER_SEC / 4) #define PROBATABLESIZE 2048 #define KB *(1<<10) #define MB *(1<<20) #define GB *(1<<30) #define PRIME1 2654435761U #define PRIME2 2246822519U #define DEFAULT_BLOCKSIZE (32 KB) #define DEFAULT_PROBA 20 /*_************************************ * Macros ***************************************/ #define DISPLAY(...) fprintf(stderr, __VA_ARGS__) #define PROGRESS(...) no_prompt ? 0 : DISPLAY(__VA_ARGS__) /*_************************************ * Benchmark Parameters ***************************************/ static U32 no_prompt = 0; /*_******************************************************* * Private functions **********************************************************/ static clock_t BMK_clockSpan( clock_t clockStart ) { return clock() - clockStart; /* works even if overflow, span limited to <= ~30mn */ } static U32 BMK_rand (U32* seed) { *seed = ( (*seed) * PRIME1) + PRIME2; return (*seed) >> 11; } static void BMK_genData(void* buffer, size_t buffSize, double p) { char table[PROBATABLESIZE] = {0}; int remaining = PROBATABLESIZE; unsigned pos = 0; unsigned s = 0; char* op = (char*) buffer; char* oend = op + buffSize; unsigned seed = 1; static unsigned done = 0; if (p<0.01) p = 0.005; if (p>1.) p = 1.; if (!done) { done = 1; DISPLAY("Generating %i KB with P=%.2f%%\n", (int)(buffSize >> 10), p*100); } /* Build Table */ while (remaining) { unsigned n = (unsigned)(remaining * p); unsigned end; if (!n) n=1; end = pos + n; while (pos<end) table[pos++]=(char)s; s++; if (s==255) s=0; /* for compatibility with count254 test */ remaining -= n; } /* Fill buffer */ while (op<oend) { const unsigned r = BMK_rand(&seed) & (PROBATABLESIZE-1); *op++ = table[r]; } } /*_******************************************************* * Benchmark function **********************************************************/ static int local_trivialCount(void* dst, size_t dstSize, const void* src, size_t srcSize) { U32 count[256] = {0}; const BYTE* ip = (const BYTE*)src; const BYTE* const end = ip + srcSize; (void)dst; (void)dstSize; while (ip<end) count[*ip++]++; return (int)count[ip[-1]]; } static int local_count8(void* dst, size_t dstSize, const void* src, size_t srcSize) { #define NBT 8 U32 count[NBT][256]; const BYTE* ip = (const BYTE*)src; const BYTE* const end = ip + srcSize - (NBT-1); (void)dst; (void)dstSize; memset(count, 0, sizeof(count)); while (ip<end) { unsigned idx; for (idx=0; idx<NBT; idx++) count[idx][*ip++]++; } { unsigned n; for (n=0; n<256; n++) { unsigned idx; for (idx=1; idx<NBT; idx++) count[0][n] += count[idx][n]; } } return (int)count[0][ip[-1]]; } /* U64 version */ static int local_count8v2(void* dst, size_t dstSize, const void* src, size_t srcSize) { U32 count[8][256+16]; const U64* ptr = (const U64*) src; const U64* end = ptr + (srcSize >> 3); U64 next = *ptr++; (void)dst; (void)dstSize; memset(count, 0, sizeof(count)); while (ptr != end) { register U64 bs = next; next = *ptr++; count[ 0][(BYTE)bs] ++; count[ 1][(BYTE)(bs>>8)] ++; count[ 2][(BYTE)(bs>>16)] ++; count[ 3][(BYTE)(bs>>24)] ++; count[ 4][(BYTE)(bs>>32)] ++; count[ 5][(BYTE)(bs>>40)] ++; count[ 6][(BYTE)(bs>>48)] ++; count[ 7][(BYTE)(bs>>56)] ++; } { unsigned u; for (u = 0; u < 256; u++) { unsigned idx; for (idx=1; idx<8; idx++) count[0][u] += count[idx][u]; } } return count[0][0]; } /* hist_X_Y function from https://github.com/powturbo/turbohist */ static int local_hist_4_32(void* dst, size_t dstSize, const void* src, size_t srcSize) { //#define NU 8 #define NU 16 int i; U32 count[256]={0}; U32 c0[256]={0},c1[256]={0},c2[256]={0},c3[256]={0}; const U32* ip32 = (const U32*)src; const U32* const ip32end = ip32 + (srcSize >> 2); const BYTE* ip = (const BYTE*)src; const BYTE* const iend = ip + srcSize; U32 cp = *ip32; (void)dst; (void)dstSize; for(; ip32 != ip32end; ) { U32 c = cp; ip32++; cp = *ip32; c0[(unsigned char)c ]++; c1[(unsigned char)(c>>8) ]++; c2[(unsigned char)(c>>16)]++; c3[c>>24 ]++; c = cp; ip32++; cp = *ip32; c0[(unsigned char)c ]++; c1[(unsigned char)(c>>8) ]++; c2[(unsigned char)(c>>16)]++; c3[c>>24 ]++; #if NU == 16 c = cp; ip32++; cp = *ip32; c0[(unsigned char)c ]++; c1[(unsigned char)(c>>8) ]++; c2[(unsigned char)(c>>16)]++; c3[c>>24 ]++; c = cp; ip32++; cp = *ip32; c0[(unsigned char)c ]++; c1[(unsigned char)(c>>8) ]++; c2[(unsigned char)(c>>16)]++; c3[c>>24 ]++; #endif } ip = (const BYTE*)ip32; while(ip < iend) c0[*ip++]++; for(i = 0; i < 256; i++) count[i] = c0[i]+c1[i]+c2[i]+c3[i]; return count[0]; } static int local_hist_4_32v2(void* dst, size_t dstSize, const void* src, size_t srcSize) { U32 c0[256]={0},c1[256]={0},c2[256]={0},c3[256]={0}; const U32* ip32 = (const U32*)src; const U32* const ip32end = ip32 + (srcSize>>2); const BYTE* ip = (const BYTE*)src; const BYTE* const iend = ip + srcSize; U32 cp = *ip32; int i; (void)dst; (void)dstSize; while (ip32 <= ip32end-4) { U32 c = cp, d = *(++ip32); cp = *(++ip32); c0[(BYTE) c ]++; c1[(BYTE) d ]++; c2[(BYTE)(c>>8)]++; c>>=16; c3[(BYTE)(d>>8)]++; d>>=16; c0[(BYTE) c ]++; c1[(BYTE) d ]++; c2[ c>>8 ]++; c3[ d>>8 ]++; c = cp; d = *(++ip32); cp = *(++ip32); c0[(BYTE) c ]++; c1[(BYTE) d ]++; c2[(BYTE)(c>>8)]++; c>>=16; c3[(BYTE)(d>>8)]++; d>>=16; c0[(BYTE) c ]++; c1[(BYTE) d ]++; c2[ c>>8 ]++; c3[ d>>8 ]++; } ip = (const BYTE*)ip32; while(ip < iend) c0[*ip++]++; for(i = 0; i < 256; i++) c0[i] += c1[i]+c2[i]+c3[i]; return c0[0]; } #define PAD 8 static int local_hist_8_32(void* dst, size_t dstSize, const void* src, size_t srcSize) { U32 c0[256+PAD]={0},c1[256+PAD]={0},c2[256+PAD]={0},c3[256+PAD]={0},c4[256+PAD]={0},c5[256+PAD]={0},c6[256+PAD]={0},c7[256+PAD]={0}; const U32* ip32 = (const U32*)src; const U32* const ip32end = ip32 + (srcSize >> 2); const BYTE* ip = (const BYTE*)src; const BYTE* const iend = (const BYTE*)src + srcSize; U32 cp = *(const U32*)src; int i; (void)dst; (void)dstSize; while( ip32 <= ip32end - 4 ) { U32 c = cp, d = *(++ip32); cp = *(++ip32); c0[(unsigned char) c ]++; c1[(unsigned char) d ]++; c2[(unsigned char)(c>>8)]++; c>>=16; c3[(unsigned char)(d>>8)]++; d>>=16; c4[(unsigned char) c ]++; c5[(unsigned char) d ]++; c6[ c>>8 ]++; c7[ d>>8 ]++; c = cp; d = *(++ip32); cp = *(++ip32); c0[(unsigned char) c ]++; c1[(unsigned char) d ]++; c2[(unsigned char)(c>>8)]++; c>>=16; c3[(unsigned char)(d>>8)]++; d>>=16; c4[(unsigned char) c ]++; c5[(unsigned char) d ]++; c6[ c>>8 ]++; c7[ d>>8 ]++; } ip = (const BYTE*) ip32; while(ip < iend) c0[*ip++]++; for(i = 0; i < 256; i++) c0[i] += c1[i]+c2[i]+c3[i]+c4[i]+c5[i]+c6[i]+c7[i]; return c0[0]; } /* Modified version of count2x64 by <NAME>, using C instead of assembler */ #define C_INC_TABLES(src0, src1, count, i) \ { \ U64 byte0 = src0 & 0xFF;\ U64 byte1 = src1 & 0xFF;\ U64 byte2 = (src0 & 0xFF00) >> 8; \ U64 byte3 = (src1 & 0xFF00) >> 8; \ count[i+0][byte0]++;\ count[i+1][byte1]++;\ count[i+2][byte2]++; \ count[i+3][byte3]++; \ } #define COUNT_SIZE (256+16) static int local_count2x64v2(void* dst, size_t dstSize, const void* src0, size_t srcSize) { const U64* src64 = (const U64*)src0; const U64* src64end = src64 + (srcSize>>3); const BYTE* src = (const BYTE*)src0; U64 remainder = srcSize; U64 next0, next1; U32 count[16][COUNT_SIZE]; (void)dst; (void)dstSize; memset(count, 0, sizeof(count)); if (srcSize < 32) goto handle_remainder; remainder = srcSize % 16; next0 = src64[0]; next1 = src64[1]; while (src64 != src64end) { U64 data0 = next0; U64 data1 = next1; src64 += 2; next0 = src64[0]; next1 = src64[1]; C_INC_TABLES(data0, data1, count, 0); data0 >>= 16; data1 >>= 16; C_INC_TABLES(data0, data1, count, 0); data0 >>= 16; data1 >>= 16; C_INC_TABLES(data0, data1, count, 0); data0 >>= 16; data1 >>= 16; C_INC_TABLES(data0, data1, count, 0); } handle_remainder: { size_t i; for (i = 0; i < remainder; i++) { size_t byte = src[i]; count[0][byte]++; } for (i = 0; i < 256; i++) { int idx; for (idx=1; idx < 16; idx++) { count[0][i] += count[idx][i]; } } } return count[0][0]; } static void histo_by8(U32* counts, const BYTE* rawArray, size_t rawLen) { U32 countsArray[4][256]; memset(countsArray,0,sizeof(countsArray)); const BYTE* rawPtr = rawArray; const BYTE* const rawEnd = rawArray+rawLen; const BYTE* rawEndMul4 = rawArray+(rawLen&~3); while(rawPtr < rawEndMul4) { U64 x = MEM_read64(rawPtr); countsArray[0][x & 0xff]++; x >>= 8; countsArray[1][x & 0xff]++; x >>= 8; countsArray[2][x & 0xff]++; x >>= 8; countsArray[3][x & 0xff]++; x >>= 8; countsArray[0][x & 0xff]++; x >>= 8; countsArray[1][x & 0xff]++; x >>= 8; countsArray[2][x & 0xff]++; x >>= 8; countsArray[3][x] ++; // last one doesn't need to mask rawPtr += 8; } // finish the last few bytes (just throw them into array 0, doesn't matter) while(rawPtr < rawEnd) countsArray[0][ *rawPtr++ ] ++; // sum the countsarrays together { U32 s; for (s=0; s<256; s++) { counts[s] = countsArray[0][s] + countsArray[1][s] + countsArray[2][s] + countsArray[3][s]; } } } static int local_histo_by8(void* dst, size_t dstSize, const void* src, size_t srcSize) { U32 count[256]; (void)dst;(void)dstSize; histo_by8(count, (const BYTE*)src, srcSize); return count[0]; } static int local_FSE_count255(void* dst, size_t dstSize, const void* src, size_t srcSize) { U32 count[256]; U32 max = 255; (void)dst; (void)dstSize; return (int)HIST_count(count, &max, (const BYTE*)src, (U32)srcSize); } static int local_FSE_count254(void* dst, size_t dstSize, const void* src, size_t srcSize) { U32 count[256]; U32 max = 254; (void)dst; (void)dstSize; return (int)HIST_count(count, &max, (const BYTE*)src, (U32)srcSize); } static int local_FSE_countFast254(void* dst, size_t dstSize, const void* src, size_t srcSize) { U32 count[256]; U32 max = 254; (void)dst; (void)dstSize; return (int)HIST_countFast(count, &max, (const unsigned char*)src, srcSize); } static int local_FSE_compress(void* dst, size_t dstSize, const void* src, size_t srcSize) { return (int)FSE_compress(dst, dstSize, src, srcSize); } static int local_HUF_compress(void* dst, size_t dstSize, const void* src, size_t srcSize) { return (int)HUF_compress(dst, dstSize, src, srcSize); } static U32 fakeTree[256]; static void* const g_treeVoidPtr = fakeTree; static HUF_CElt* g_tree; static short g_normTable[256]; static U32 g_countTable[256]; static U32 g_tableLog; static U32 g_CTable[2350]; static U32 g_DTable[FSE_DTABLE_SIZE_U32(12)]; static U32 g_max; static U32 g_bmi2 = 0; static size_t g_skip; static size_t g_cSize; static size_t g_oSize; #define DTABLE_LOG 12 HUF_CREATE_STATIC_DTABLEX2(g_huff_dtable, DTABLE_LOG); static void BMK_init(void) { g_tree = (HUF_CElt*) g_treeVoidPtr; g_bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid()); } static int local_HUF_buildCTable(void* dst, size_t dstSize, const void* src, size_t srcSize) { (void)dst; (void)dstSize; (void)src; (void)srcSize; return (int)HUF_buildCTable(g_tree, g_countTable, g_max, 0); } static int local_HUF_writeCTable(void* dst, size_t dstSize, const void* src, size_t srcSize) { (void)src; (void)srcSize; return (int)HUF_writeCTable(dst, dstSize, g_tree, g_max, g_tableLog); } static int local_HUF_readCTable(void* dst, size_t dstSize, const void* src, size_t srcSize) { (void)dst; (void)dstSize; unsigned max = 255; unsigned hasZeros = 0; return (int)HUF_readCTable(g_tree, &max, src, srcSize, &hasZeros); } static int local_HUF_compress4x_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize) { return (int)HUF_compress4X_usingCTable(dst, dstSize, src, srcSize, g_tree); } static unsigned huf4x_wksp[HUF_WORKSPACE_SIZE_U32]; static int local_HUF_compress4x_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize) { HUF_repeat repeat = HUF_repeat_valid; return (int)HUF_compress4X_repeat(dst, dstSize, src, srcSize, g_max, g_tableLog, huf4x_wksp, sizeof(huf4x_wksp), g_tree, &repeat, 1, g_bmi2); } static int local_FSE_normalizeCount(void* dst, size_t dstSize, const void* src, size_t srcSize) { (void)dst; (void)dstSize; (void)src; return (int)FSE_normalizeCount(g_normTable, 0, g_countTable, srcSize, g_max); } static int local_FSE_writeNCount(void* dst, size_t dstSize, const void* src, size_t srcSize) { (void)src; (void)srcSize; return (int)FSE_writeNCount(dst, dstSize, g_normTable, g_max, g_tableLog); } /* static int local_FSE_writeHeader_small(void* dst, size_t dstSize, const void* src, size_t srcSize) { (void)src; (void)srcSize; (void)dstSize; return FSE_writeHeader(dst, 500, g_normTable, 255, g_tableLog); } */ static int local_FSE_buildCTable(void* dst, size_t dstSize, const void* src, size_t srcSize) { (void)dst; (void)dstSize; (void)src; (void)srcSize; return (int)FSE_buildCTable(g_CTable, g_normTable, g_max, g_tableLog); } static int local_FSE_buildCTable_raw(void* dst, size_t dstSize, const void* src, size_t srcSize) { (void)dst; (void)dstSize; (void)src; (void)srcSize; return (int)FSE_buildCTable_raw(g_CTable, 6); } static int local_FSE_compress_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize) { return (int)FSE_compress_usingCTable(dst, dstSize, src, srcSize, g_CTable); } static int local_FSE_compress_usingCTable_tooSmall(void* dst, size_t dstSize, const void* src, size_t srcSize) { (void)dstSize; return (int)FSE_compress_usingCTable(dst, FSE_BLOCKBOUND(srcSize)-1, src, srcSize, g_CTable); } static int local_FSE_readNCount(void* src, size_t srcSize, const void* initialBuffer, size_t initialBufferSize) { short norm[256]; (void)initialBuffer; (void)initialBufferSize; return (int)FSE_readNCount(norm, &g_max, &g_tableLog, src, srcSize); } static int local_FSE_buildDTable(void* dst, size_t dstSize, const void* src, size_t srcSize) { (void)dst; (void)dstSize; (void)src; (void)srcSize; return (int)FSE_buildDTable(g_DTable, g_normTable, g_max, g_tableLog); } static int local_FSE_buildDTable_raw(void* dst, size_t dstSize, const void* src, size_t srcSize) { (void)dst; (void)dstSize; (void)src; (void)srcSize; return (int)FSE_buildDTable_raw(g_DTable, 6); } static int local_FSE_decompress_usingDTable(void* dst, size_t maxDstSize, const void* src, size_t srcSize) { (void)srcSize; return (int)FSE_decompress_usingDTable(dst, maxDstSize, (const BYTE*)src + g_skip, g_cSize, g_DTable); } static int local_FSE_decompress(void* dst, size_t maxDstSize, const void* src, size_t srcSize) { (void)srcSize; return (int)FSE_decompress(dst, maxDstSize, src, g_cSize); } static int local_HUF_decompress(void* dst, size_t maxDstSize, const void* src, size_t srcSize) { (void)srcSize; (void)maxDstSize; return (int)HUF_decompress(dst, g_oSize, src, g_cSize); } static int local_HUF_decompress4X1(void* dst, size_t maxDstSize, const void* src, size_t srcSize) { (void)srcSize; (void)maxDstSize; return (int)HUF_decompress4X1(dst, g_oSize, src, g_cSize); } static int local_HUF_decompress4X2(void* dst, size_t maxDstSize, const void* src, size_t srcSize) { (void)srcSize; (void)maxDstSize; return (int)HUF_decompress4X2(dst, g_oSize, src, g_cSize); } static int local_HUF_decompress1X1(void* dst, size_t maxDstSize, const void* src, size_t srcSize) { (void)srcSize; (void)maxDstSize; return (int)HUF_decompress1X1(dst, g_oSize, src, g_cSize); } static int local_HUF_decompress1X2(void* dst, size_t maxDstSize, const void* src, size_t srcSize) { (void)srcSize; (void)maxDstSize; return (int)HUF_decompress1X2(dst, g_oSize, src, g_cSize); } static int local_HUF_readStats(void* dst, size_t maxDstSize, const void* src, size_t srcSize) { BYTE weights[HUF_SYMBOLVALUE_MAX+1]; U32 ranks[HUF_TABLELOG_ABSOLUTEMAX+1]; U32 nbSymbols=0, tableLog=0; (void)dst; (void)maxDstSize; (void)srcSize; return (int)HUF_readStats(weights, HUF_SYMBOLVALUE_MAX+1, ranks, &nbSymbols, &tableLog, src, g_cSize); } static int local_HUF_readDTableX2(void* dst, size_t maxDstSize, const void* src, size_t srcSize) { (void)dst; (void)maxDstSize; (void)srcSize; return (int)HUF_readDTableX2(g_huff_dtable, src, g_cSize); } static int local_HUF_readDTable(void* dst, size_t maxDstSize, const void* src, size_t srcSize) { return local_HUF_readDTableX2(dst, maxDstSize, src, srcSize); } static int local_HUF_readDTableX1(void* dst, size_t maxDstSize, const void* src, size_t srcSize) { (void)dst; (void)maxDstSize; (void)srcSize; return (int)HUF_readDTableX1(g_huff_dtable, src, g_cSize); } static int local_HUF_decompress4X1_usingDTable(void* dst, size_t maxDstSize, const void* src, size_t srcSize) { (void)srcSize; (void)maxDstSize; return (int)HUF_decompress4X1_usingDTable(dst, g_oSize, src, g_cSize, g_huff_dtable); } static int local_HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* src, size_t srcSize) { (void)srcSize; (void)maxDstSize; return (int)HUF_decompress4X2_usingDTable(dst, g_oSize, src, g_cSize, g_huff_dtable); } static int local_HUF_decompress_usingDTable(void* dst, size_t maxDstSize, const void* src, size_t srcSize) { return local_HUF_decompress4X2_usingDTable(dst, maxDstSize, src, srcSize); } static int local_HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* src, size_t srcSize) { (void)srcSize; (void)maxDstSize; return (int)HUF_decompress4X_usingDTable_bmi2(dst, g_oSize, src, g_cSize, g_huff_dtable, g_bmi2); } static int local_HUF_decompress1X1_usingDTable(void* dst, size_t maxDstSize, const void* src, size_t srcSize) { (void)srcSize; (void)maxDstSize; return (int)HUF_decompress1X1_usingDTable(dst, g_oSize, src, g_cSize, g_huff_dtable); } static int local_HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* src, size_t srcSize) { (void)srcSize; (void)maxDstSize; return (int)HUF_decompress1X2_usingDTable(dst, g_oSize, src, g_cSize, g_huff_dtable); } static int local_HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* src, size_t srcSize) { (void)srcSize; (void)maxDstSize; return (int)HUF_decompress1X_usingDTable_bmi2(dst, g_oSize, src, g_cSize, g_huff_dtable, g_bmi2); } int runBench(const void* buffer, size_t blockSize, U32 algNb, U32 nbBenchs) { size_t benchedSize = blockSize; size_t cBuffSize = FSE_compressBound((unsigned)benchedSize); void* oBuffer = malloc(blockSize); void* cBuffer = malloc(cBuffSize); const char* funcName; int (*func)(void* dst, size_t dstSize, const void* src, size_t srcSize); /* Init */ memcpy(oBuffer, buffer, blockSize); /* Bench selection */ switch (algNb) { case 1: funcName = "HIST_count(255)"; func = local_FSE_count255; break; case 2: funcName = "HIST_count(254)"; func = local_FSE_count254; break; case 3: funcName = "HIST_countFast(254)"; func = local_FSE_countFast254; break; case 4: { g_max=255; HIST_count(g_countTable, &g_max, (const unsigned char*)oBuffer, benchedSize); g_tableLog = FSE_optimalTableLog(g_tableLog, benchedSize, g_max); funcName = "FSE_normalizeCount"; func = local_FSE_normalizeCount; break; } case 5: { g_max=255; HIST_count(g_countTable, &g_max, (const unsigned char*)oBuffer, benchedSize); g_tableLog = FSE_optimalTableLog(g_tableLog, benchedSize, g_max); FSE_normalizeCount(g_normTable, g_tableLog, g_countTable, benchedSize, g_max); funcName = "FSE_writeNCount"; func = local_FSE_writeNCount; break; } case 6: { g_max=255; HIST_count(g_countTable, &g_max, (const unsigned char*)oBuffer, benchedSize); g_tableLog = FSE_optimalTableLog(g_tableLog, benchedSize, g_max); FSE_normalizeCount(g_normTable, g_tableLog, g_countTable, benchedSize, g_max); funcName = "FSE_buildCTable"; func = local_FSE_buildCTable; break; } case 7: { U32 max=255; HIST_count(g_countTable, &max, (const unsigned char*)oBuffer, benchedSize); g_tableLog = (U32)FSE_normalizeCount(g_normTable, g_tableLog, g_countTable, benchedSize, max); FSE_buildCTable(g_CTable, g_normTable, max, g_tableLog); funcName = "FSE_compress_usingCTable"; func = local_FSE_compress_usingCTable; break; } case 8: { U32 max=255; HIST_count(g_countTable, &max, (const unsigned char*)oBuffer, benchedSize); g_tableLog = (U32)FSE_normalizeCount(g_normTable, g_tableLog, g_countTable, benchedSize, max); FSE_buildCTable(g_CTable, g_normTable, max, g_tableLog); funcName = "FSE_compress_usingCTable_smallDst"; func = local_FSE_compress_usingCTable_tooSmall; break; } case 9: funcName = "FSE_compress"; func = local_FSE_compress; break; case 11: { FSE_compress(cBuffer, cBuffSize, oBuffer, benchedSize); g_max = 255; funcName = "FSE_readNCount"; func = local_FSE_readNCount; break; } case 12: { FSE_compress(cBuffer, cBuffSize, oBuffer, benchedSize); g_max = 255; FSE_readNCount(g_normTable, &g_max, &g_tableLog, cBuffer, benchedSize); funcName = "FSE_buildDTable"; func = local_FSE_buildDTable; break; } case 13: { g_cSize = FSE_compress(cBuffer, cBuffSize, oBuffer, benchedSize); memcpy(oBuffer, cBuffer, g_cSize); g_max = 255; g_skip = FSE_readNCount(g_normTable, &g_max, &g_tableLog, oBuffer, g_cSize); g_cSize -= g_skip; FSE_buildDTable (g_DTable, g_normTable, g_max, g_tableLog); funcName = "FSE_decompress_usingDTable"; func = local_FSE_decompress_usingDTable; break; } case 14: { g_cSize = FSE_compress(cBuffer, cBuffSize, oBuffer, benchedSize); memcpy(oBuffer, cBuffer, g_cSize); funcName = "FSE_decompress"; func = local_FSE_decompress; break; } case 20: funcName = "HUF_compress"; func = local_HUF_compress; break; case 21: { g_max=255; HIST_count(g_countTable, &g_max, (const unsigned char*)oBuffer, benchedSize); funcName = "HUF_buildCTable"; func = local_HUF_buildCTable; break; } case 22: { g_max=255; HIST_count(g_countTable, &g_max, (const unsigned char*)oBuffer, benchedSize); g_tableLog = (U32)HUF_buildCTable(g_tree, g_countTable, g_max, 0); funcName = "HUF_writeCTable"; func = local_HUF_writeCTable; break; } case 23: { g_max=255; HIST_count(g_countTable, &g_max, (const unsigned char*)oBuffer, benchedSize); g_tableLog = (U32)HUF_buildCTable(g_tree, g_countTable, g_max, 0); funcName = "HUF_compress4x_usingCTable"; func = local_HUF_compress4x_usingCTable; break; } case 24: { g_max=255; HIST_count(g_countTable, &g_max, (const unsigned char*)oBuffer, benchedSize); g_tableLog = (U32)HUF_buildCTable(g_tree, g_countTable, g_max, 0); funcName = "HUF_compress4x_usingCTable_bmi2"; func = local_HUF_compress4x_usingCTable_bmi2; break; } case 25: { g_cSize = HUF_compress(cBuffer, cBuffSize, oBuffer, benchedSize); memcpy(oBuffer, cBuffer, g_cSize); funcName = "HUF_readCTable"; func = local_HUF_readCTable; break; } case 30: { g_oSize = benchedSize; g_cSize = HUF_compress(cBuffer, cBuffSize, oBuffer, benchedSize); memcpy(oBuffer, cBuffer, g_cSize); funcName = "HUF_decompress"; func = local_HUF_decompress; break; } case 31: { g_cSize = HUF_compress(cBuffer, cBuffSize, oBuffer, benchedSize); memcpy(oBuffer, cBuffer, g_cSize); funcName = "HUF_readStats"; func = local_HUF_readStats; break; } case 32: { g_cSize = HUF_compress(cBuffer, cBuffSize, oBuffer, benchedSize); memcpy(oBuffer, cBuffer, g_cSize); funcName = "HUF_readDTable"; func = local_HUF_readDTable; break; } case 33: { size_t hSize; g_oSize = benchedSize; g_cSize = HUF_compress(cBuffer, cBuffSize, oBuffer, benchedSize); hSize = HUF_readDTableX2(g_huff_dtable, cBuffer, g_cSize); g_cSize -= hSize; memcpy(oBuffer, ((char*)cBuffer)+hSize, g_cSize); funcName = "HUF_decompress_usingDTable"; func = local_HUF_decompress_usingDTable; break; } case 40: { g_oSize = benchedSize; g_cSize = HUF_compress(cBuffer, cBuffSize, oBuffer, benchedSize); memcpy(oBuffer, cBuffer, g_cSize); funcName = "HUF_decompress4X1"; func = local_HUF_decompress4X1; break; } case 41: { g_cSize = HUF_compress(cBuffer, cBuffSize, oBuffer, benchedSize); memcpy(oBuffer, cBuffer, g_cSize); funcName = "HUF_readDTableX1"; func = local_HUF_readDTableX1; break; } case 42: { size_t hSize; g_oSize = benchedSize; g_cSize = HUF_compress(cBuffer, cBuffSize, oBuffer, benchedSize); hSize = HUF_readDTableX1(g_huff_dtable, cBuffer, g_cSize); g_cSize -= hSize; memcpy(oBuffer, ((char*)cBuffer)+hSize, g_cSize); funcName = "HUF_decompress4X1_usingDTable"; func = local_HUF_decompress4X1_usingDTable; break; } case 43: { size_t hSize; g_oSize = benchedSize; g_cSize = HUF_compress(cBuffer, cBuffSize, oBuffer, benchedSize); hSize = HUF_readDTableX1(g_huff_dtable, cBuffer, g_cSize); g_cSize -= hSize; memcpy(oBuffer, ((char*)cBuffer)+hSize, g_cSize); funcName = "HUF_decompress4X1_usingDTable_bmi2"; func = local_HUF_decompress4X_usingDTable_bmi2; break; } case 45: { g_oSize = benchedSize; g_max = 255; HIST_count(g_countTable, &g_max, (const unsigned char*)oBuffer, benchedSize); g_tableLog = (U32)HUF_buildCTable(g_tree, g_countTable, g_max, 0); g_cSize = HUF_writeCTable(cBuffer, cBuffSize, g_tree, g_max, g_tableLog); g_cSize += HUF_compress1X_usingCTable(((BYTE*)cBuffer) + g_cSize, cBuffSize, oBuffer, benchedSize, g_tree); memcpy(oBuffer, cBuffer, g_cSize); funcName = "HUF_decompress1X1"; func = local_HUF_decompress1X1; break; } case 46: { size_t hSize; g_oSize = benchedSize; g_max = 255; HIST_count(g_countTable, &g_max, (const unsigned char*)oBuffer, benchedSize); g_tableLog = (U32)HUF_buildCTable(g_tree, g_countTable, g_max, 0); hSize = HUF_writeCTable(cBuffer, cBuffSize, g_tree, g_max, g_tableLog); g_cSize = HUF_compress1X_usingCTable(((BYTE*)cBuffer) + hSize, cBuffSize, oBuffer, benchedSize, g_tree); hSize = HUF_readDTableX1(g_huff_dtable, cBuffer, g_cSize); memcpy(oBuffer, ((char*)cBuffer)+hSize, g_cSize); funcName = "HUF_decompress1X1_usingDTable"; func = local_HUF_decompress1X1_usingDTable; break; } case 47: { size_t hSize; g_oSize = benchedSize; g_max = 255; HIST_count(g_countTable, &g_max, (const unsigned char*)oBuffer, benchedSize); g_tableLog = (U32)HUF_buildCTable(g_tree, g_countTable, g_max, 0); hSize = HUF_writeCTable(cBuffer, cBuffSize, g_tree, g_max, g_tableLog); g_cSize = HUF_compress1X_usingCTable(((BYTE*)cBuffer) + hSize, cBuffSize, oBuffer, benchedSize, g_tree); hSize = HUF_readDTableX1(g_huff_dtable, cBuffer, g_cSize); memcpy(oBuffer, ((char*)cBuffer)+hSize, g_cSize); funcName = "HUF_decompress1X1_usingDTable_bmi2"; func = local_HUF_decompress1X_usingDTable_bmi2; break; } case 50: { g_oSize = benchedSize; g_cSize = HUF_compress(cBuffer, cBuffSize, oBuffer, benchedSize); memcpy(oBuffer, cBuffer, g_cSize); funcName = "HUF_decompress4X2"; func = local_HUF_decompress4X2; break; } case 51: { g_cSize = HUF_compress(cBuffer, cBuffSize, oBuffer, benchedSize); memcpy(oBuffer, cBuffer, g_cSize); funcName = "HUF_readDTableX2"; func = local_HUF_readDTableX2; break; } case 52: { size_t hSize; g_oSize = benchedSize; g_cSize = HUF_compress(cBuffer, cBuffSize, oBuffer, benchedSize); hSize = HUF_readDTableX2(g_huff_dtable, cBuffer, g_cSize); g_cSize -= hSize; memcpy(oBuffer, ((char*)cBuffer)+hSize, g_cSize); funcName = "HUF_decompress4X2_usingDTable"; func = local_HUF_decompress4X2_usingDTable; break; } case 53: { size_t hSize; g_oSize = benchedSize; g_cSize = HUF_compress(cBuffer, cBuffSize, oBuffer, benchedSize); hSize = HUF_readDTableX2(g_huff_dtable, cBuffer, g_cSize); g_cSize -= hSize; memcpy(oBuffer, ((char*)cBuffer)+hSize, g_cSize); funcName = "HUF_decompress4X2_usingDTable_bmi2"; func = local_HUF_decompress4X_usingDTable_bmi2; break; } case 55: { g_oSize = benchedSize; g_max = 255; HIST_count(g_countTable, &g_max, (const unsigned char*)oBuffer, benchedSize); g_tableLog = (U32)HUF_buildCTable(g_tree, g_countTable, g_max, 0); g_cSize = HUF_writeCTable(cBuffer, cBuffSize, g_tree, g_max, g_tableLog); g_cSize += HUF_compress1X_usingCTable(((BYTE*)cBuffer) + g_cSize, cBuffSize, oBuffer, benchedSize, g_tree); memcpy(oBuffer, cBuffer, g_cSize); funcName = "HUF_decompress1X2"; func = local_HUF_decompress1X2; break; } case 56: { size_t hSize; g_oSize = benchedSize; g_max = 255; HIST_count(g_countTable, &g_max, (const unsigned char*)oBuffer, benchedSize); g_tableLog = (U32)HUF_buildCTable(g_tree, g_countTable, g_max, 0); hSize = HUF_writeCTable(cBuffer, cBuffSize, g_tree, g_max, g_tableLog); g_cSize = HUF_compress1X_usingCTable(((BYTE*)cBuffer) + hSize, cBuffSize, oBuffer, benchedSize, g_tree); hSize = HUF_readDTableX2(g_huff_dtable, cBuffer, g_cSize); memcpy(oBuffer, ((char*)cBuffer)+hSize, g_cSize); funcName = "HUF_decompress1X2_usingDTable"; func = local_HUF_decompress1X2_usingDTable; break; } case 57: { size_t hSize; g_oSize = benchedSize; g_max = 255; HIST_count(g_countTable, &g_max, (const unsigned char*)oBuffer, benchedSize); g_tableLog = (U32)HUF_buildCTable(g_tree, g_countTable, g_max, 0); hSize = HUF_writeCTable(cBuffer, cBuffSize, g_tree, g_max, g_tableLog); g_cSize = HUF_compress1X_usingCTable(((BYTE*)cBuffer) + hSize, cBuffSize, oBuffer, benchedSize, g_tree); hSize = HUF_readDTableX2(g_huff_dtable, cBuffer, g_cSize); memcpy(oBuffer, ((char*)cBuffer)+hSize, g_cSize); funcName = "HUF_decompress1X2_usingDTable_bmi2"; func = local_HUF_decompress1X_usingDTable_bmi2; break; } case 70: { funcName = "FSE_buildCTable_raw(6)"; func = local_FSE_buildCTable_raw; break; } case 80: { g_max=255; HIST_count(g_countTable, &g_max, oBuffer, benchedSize); g_tableLog = FSE_optimalTableLog(10, benchedSize, g_max); FSE_normalizeCount(g_normTable, g_tableLog, g_countTable, benchedSize, g_max); funcName = "FSE_buildDTable(10)"; func = local_FSE_buildDTable; break; } case 81: { g_max=255; HIST_count(g_countTable, &g_max, oBuffer, benchedSize); g_tableLog = FSE_optimalTableLog(9, benchedSize, g_max); FSE_normalizeCount(g_normTable, g_tableLog, g_countTable, benchedSize, g_max); funcName = "FSE_buildDTable(9)"; func = local_FSE_buildDTable; break; } case 82: { funcName = "FSE_buildDTable_raw(6)"; func = local_FSE_buildDTable_raw; break; } /* Specific test functions */ case 100: funcName = "trivialCount"; func = local_trivialCount; break; case 101: funcName = "count8"; func = local_count8; break; case 102: funcName = "count8v2"; func = local_count8v2; break; case 103: funcName = "local_hist_4_32"; func = local_hist_4_32; break; case 104: funcName = "local_hist_4_32v2"; func = local_hist_4_32v2; break; case 105: funcName = "local_hist_8_32"; func = local_hist_8_32; break; case 106: funcName = "local_count2x64v2"; func = local_count2x64v2; break; case 107: funcName = "local_histo_by8"; func = local_histo_by8; break; default: goto _end; } /* Bench */ DISPLAY("\r%79s\r", ""); { int nbLoops = ((100 MB) / (benchedSize+1)) + 1; /* initial speed evaluation */ double bestTimeS = 999.; U32 benchNb=1; DISPLAY("%2u-%-34.34s : \r", benchNb, funcName); for (benchNb=1; benchNb <= nbBenchs; benchNb++) { size_t resultCode = 0; clock_t clockStart = clock(); while(clock() == clockStart); /* wait beginning of next tick */ clockStart = clock(); { int loopNb; for (loopNb=0; loopNb < nbLoops; loopNb++) { resultCode = func(cBuffer, cBuffSize, oBuffer, benchedSize); } } { clock_t const benchClock = BMK_clockSpan(clockStart); double const averageTimeS = (double)benchClock / nbLoops / CLOCKS_PER_SEC; if (benchClock > 0) { assert(averageTimeS != 0.0); nbLoops = (U32)(1. / averageTimeS) + 1; /*aim for 1sec*/ } else { assert(nbLoops < 20000000); /* avoid overflow */ nbLoops *= 100; } if (benchClock < BENCHCLOCK_MIN) { assert(benchNb > 0); benchNb--; continue; } if (averageTimeS < bestTimeS) bestTimeS = averageTimeS; DISPLAY("%2u-%-34.34s : %8.1f MB/s (%6u) \r", benchNb+1, funcName, (double)benchedSize / (1 MB) / bestTimeS, (U32)resultCode); } } DISPLAY("%2u#\n", algNb); } _end: free(oBuffer); free(cBuffer); return 0; } static int fullbench(const char* filename, double p, size_t blockSize, U32 algNb, U32 nbLoops) { int result = 0; void* buffer = malloc(blockSize); if (filename==NULL) BMK_genData(buffer, blockSize, p); else { FILE* f = fopen( filename, "rb" ); if (f==NULL) { DISPLAY( "Pb opening %s\n", filename); return 11; } blockSize = fread(buffer, 1, blockSize, f); DISPLAY("Loading %u bytes from %s \n", (U32)blockSize, filename); fclose(f); } if (algNb==0) { U32 u; for (u=1; u<=99; u++) result += runBench(buffer, blockSize, u, nbLoops); } else result = runBench(buffer, blockSize, algNb, nbLoops); free(buffer); return result; } static int benchMultipleFiles(const char** fnTable, int nbFn, int startFn, double p, size_t blockSize, U32 algNb, U32 nbLoops) { if (startFn==0) return fullbench(NULL, p, blockSize, algNb, nbLoops); { int i, result=0; for (i=startFn; i<nbFn; i++) result += fullbench(fnTable[i], p, blockSize, algNb, nbLoops); return result; } } static int usage(const char* exename) { DISPLAY( "Usage :\n"); DISPLAY( " %s [arg] [filename]\n", exename); DISPLAY( "Arguments :\n"); DISPLAY( " -b# : select function to benchmark (default : 0 == all)\n"); DISPLAY( " -H/-h : Help (this text + advanced options)\n"); return 0; } static int usage_advanced(const char* exename) { usage(exename); DISPLAY( "\nAdvanced options :\n"); DISPLAY( " -i# : iteration loops [1-9] (default : %i)\n", NBLOOPS); DISPLAY( " -B# : block size, in bytes (default : %i)\n", DEFAULT_BLOCKSIZE); DISPLAY( " -P# : probability curve, in %% (default : %i%%)\n", DEFAULT_PROBA); return 0; } static int badusage(const char* exename) { DISPLAY("Wrong parameters\n"); usage(exename); return 1; } int main(int argc, const char** argv) { const char* exename = argv[0]; U32 proba = DEFAULT_PROBA; U32 nbLoops = NBLOOPS; U32 pause = 0; U32 algNb = 0; U32 blockSize = DEFAULT_BLOCKSIZE; int i; int result; int fnStart=0; BMK_init(); /* Welcome message */ DISPLAY(WELCOME_MESSAGE); if (argc<1) return badusage(exename); for(i=1; i<argc; i++) { const char* argument = argv[i]; if(!argument) continue; // Protection if argument empty if (!strcmp(argument, "--no-prompt")) { no_prompt = 1; continue; } // Decode command (note : aggregated commands are allowed) if (*argument=='-') { argument ++; while (*argument!=0) { switch(*argument) { case '-': // valid separator argument++; break; // Display help on usage case 'h' : case 'H': return usage_advanced(exename); // Select Algo nb case 'b': argument++; algNb=0; while ((*argument >='0') && (*argument <='9')) { algNb*=10; algNb += *argument++ - '0'; } break; // Modify Nb loops case 'i': argument++; nbLoops=0; while ((*argument >='0') && (*argument <='9')) { nbLoops*=10; nbLoops += *argument++ - '0'; } break; // Modify data probability case 'P': argument++; proba=0; while ((*argument >='0') && (*argument <='9')) { proba*=10; proba += *argument++ - '0'; } break; // Modify block size case 'B': argument++; blockSize=0; while ((*argument >='0') && (*argument <='9')) { blockSize*=10; blockSize += *argument++ - '0'; } if (argument[0]=='K') { blockSize<<=10; argument++; } /* allows using KB notation */ if (argument[0]=='M') { blockSize<<=20; argument++; } if (argument[0]=='B') argument++; break; // Pause at the end (hidden option) case 'p': pause=1; argument++; break; // Unknown command default : return badusage(exename); } } continue; } /* note : non-commands are filenames; all filenames should be at end of line */ if (fnStart==0) fnStart = i; } result = benchMultipleFiles(argv, argc, fnStart, (double)proba / 100, blockSize, algNb, nbLoops); if (pause) { DISPLAY("press enter...\n"); getchar(); } return result; }
22,997
585
<gh_stars>100-1000 /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.solr.client.solrj.request.beans; import org.apache.solr.common.annotation.JsonProperty; import org.apache.solr.common.util.ReflectMapWriter; import java.util.List; import java.util.Map; public class CreateCorePayload implements ReflectMapWriter { @JsonProperty(required = true) public String name; @JsonProperty public String instanceDir; @JsonProperty public String dataDir; @JsonProperty public String ulogDir; @JsonProperty public String schema; @JsonProperty public String config; @JsonProperty public String configSet; @JsonProperty public Boolean loadOnStartup; // If our JsonProperty clone was more feature-rich here we could specify the property be called 'transient', but // without that support it needs to be named something else to avoid conflicting with the 'transient' keyword in Java @JsonProperty public Boolean isTransient; @JsonProperty public String shard; @JsonProperty public String collection; // TODO - what type is 'roles' expected to be? @JsonProperty public List<String> roles; @JsonProperty public String replicaType; @JsonProperty public Map<String, Object> properties; @JsonProperty public String coreNodeName; @JsonProperty public Integer numShards; @JsonProperty public Boolean newCollection; @JsonProperty public String async; }
683
4,036
#if !defined(CODEQL_ITERATOR_H) #define CODEQL_ITERATOR_H typedef unsigned long size_t; #include "type_traits.h" namespace std { struct ptrdiff_t; template<class I> struct iterator_traits; template <class Category, class value_type, class difference_type = ptrdiff_t, class pointer_type = value_type*, class reference_type = value_type&> struct iterator { typedef Category iterator_category; iterator(); iterator(iterator<Category, remove_const_t<value_type> > const &other); // non-const -> const conversion constructor iterator &operator++(); iterator operator++(int); iterator &operator--(); iterator operator--(int); bool operator==(iterator other) const; bool operator!=(iterator other) const; reference_type operator*() const; pointer_type operator->() const; iterator operator+(int); iterator operator-(int); iterator &operator+=(int); iterator &operator-=(int); int operator-(iterator); reference_type operator[](int); }; struct input_iterator_tag {}; struct forward_iterator_tag : public input_iterator_tag {}; struct bidirectional_iterator_tag : public forward_iterator_tag {}; struct random_access_iterator_tag : public bidirectional_iterator_tag {}; struct output_iterator_tag {}; template<class Container> class back_insert_iterator { protected: Container* container = nullptr; public: using iterator_category = output_iterator_tag; using value_type = void; using difference_type = ptrdiff_t; using pointer = void; using reference = void; using container_type = Container; constexpr back_insert_iterator() noexcept = default; constexpr explicit back_insert_iterator(Container& x); back_insert_iterator& operator=(const typename Container::value_type& value); back_insert_iterator& operator=(typename Container::value_type&& value); back_insert_iterator& operator*(); back_insert_iterator& operator++(); back_insert_iterator operator++(int); }; template<class Container> constexpr back_insert_iterator<Container> back_inserter(Container& x) { return back_insert_iterator<Container>(x); } template<class Container> class front_insert_iterator { protected: Container* container = nullptr; public: using iterator_category = output_iterator_tag; using value_type = void; using difference_type = ptrdiff_t; using pointer = void; using reference = void; using container_type = Container; constexpr front_insert_iterator() noexcept = default; constexpr explicit front_insert_iterator(Container& x); constexpr front_insert_iterator& operator=(const typename Container::value_type& value); constexpr front_insert_iterator& operator=(typename Container::value_type&& value); constexpr front_insert_iterator& operator*(); constexpr front_insert_iterator& operator++(); constexpr front_insert_iterator operator++(int); }; template<class Container> constexpr front_insert_iterator<Container> front_inserter(Container& x) { return front_insert_iterator<Container>(x); } } #endif
941
2,782
package course.examples.graphics.shapedraw; import android.app.Activity; import android.graphics.Color; import android.graphics.drawable.ShapeDrawable; import android.graphics.drawable.shapes.OvalShape; import android.os.Bundle; import android.widget.ImageView; import android.widget.RelativeLayout; public class ShapeDrawActivity extends Activity { private final static int ALPHA = 127; @Override public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.main); int width = (int) getResources().getDimension(R.dimen.image_width); int height = (int) getResources().getDimension(R.dimen.image_height); int padding = (int) getResources().getDimension(R.dimen.padding); // Get container View RelativeLayout rl = findViewById(R.id.main_window); // Create Cyan Shape ShapeDrawable cyanShape = new ShapeDrawable(new OvalShape()); cyanShape.getPaint().setColor(Color.CYAN); cyanShape.setIntrinsicHeight(height); cyanShape.setIntrinsicWidth(width); cyanShape.setAlpha(ALPHA); // Put Cyan Shape into an ImageView ImageView cyanView = new ImageView(getApplicationContext()); cyanView.setImageDrawable(cyanShape); cyanView.setPadding(padding, padding, padding, padding); // Specify placement of ImageView within RelativeLayout RelativeLayout.LayoutParams cyanViewLayoutParams = new RelativeLayout.LayoutParams( height, width); cyanViewLayoutParams.addRule(RelativeLayout.CENTER_VERTICAL); cyanViewLayoutParams.addRule(RelativeLayout.ALIGN_PARENT_LEFT); cyanView.setLayoutParams(cyanViewLayoutParams); rl.addView(cyanView); // Create Magenta Shape ShapeDrawable magentaShape = new ShapeDrawable(new OvalShape()); magentaShape.getPaint().setColor(Color.MAGENTA); magentaShape.setIntrinsicHeight(height); magentaShape.setIntrinsicWidth(width); magentaShape.setAlpha(ALPHA); // Put Magenta Shape into an ImageView ImageView magentaView = new ImageView(getApplicationContext()); magentaView.setImageDrawable(magentaShape); magentaView.setPadding(padding, padding, padding, padding); // Specify placement of ImageView within RelativeLayout RelativeLayout.LayoutParams magentaViewLayoutParams = new RelativeLayout.LayoutParams( height, width); magentaViewLayoutParams.addRule(RelativeLayout.CENTER_VERTICAL); magentaViewLayoutParams.addRule(RelativeLayout.ALIGN_PARENT_RIGHT); magentaView.setLayoutParams(magentaViewLayoutParams); rl.addView(magentaView); } }
823
788
<reponame>afedyukova/libact """This module includes some functions to be reused in query strategy testing. """ import numpy as np def run_qs(trn_ds, qs, truth, quota): """Run query strategy on specified dataset and return quering sequence. Parameters ---------- trn_ds : Dataset object The dataset to be run on. qs : QueryStrategy instance The active learning algorith to be run. truth : array-like The true label. quota : int Number of iterations to run Returns ------- qseq : numpy array, shape (quota,) The numpy array of entry_id representing querying sequence. """ ret = [] for _ in range(quota): ask_id = qs.make_query() trn_ds.update(ask_id, truth[ask_id]) ret.append(ask_id) return np.array(ret)
322
324
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jclouds.openstack.nova.v2_0.functions; import static org.testng.Assert.assertEquals; import java.net.URI; import java.util.Map; import com.google.common.base.Function; import com.google.common.base.Functions; import com.google.common.base.Supplier; import com.google.common.base.Suppliers; import com.google.inject.Injector; import org.jclouds.compute.domain.SecurityGroupBuilder; import org.jclouds.domain.Location; import org.jclouds.domain.LocationBuilder; import org.jclouds.domain.LocationScope; import org.jclouds.http.HttpRequest; import org.jclouds.http.HttpResponse; import org.jclouds.net.domain.IpPermission; import org.jclouds.net.domain.IpProtocol; import org.jclouds.openstack.nova.v2_0.NovaApi; import org.jclouds.openstack.nova.v2_0.compute.functions.CreateSecurityGroupIfNeeded; import org.jclouds.openstack.nova.v2_0.compute.functions.NeutronSecurityGroupToSecurityGroup; import org.jclouds.openstack.nova.v2_0.compute.functions.NovaSecurityGroupInRegionToSecurityGroup; import org.jclouds.openstack.nova.v2_0.domain.regionscoped.RegionSecurityGroupNameAndPorts; import org.jclouds.openstack.nova.v2_0.domain.regionscoped.SecurityGroupInRegion; import org.jclouds.openstack.nova.v2_0.internal.BaseNovaApiExpectTest; import org.testng.annotations.Test; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableMap.Builder; import com.google.common.collect.ImmutableMultimap; import com.google.common.collect.ImmutableSet; @Test(groups = "unit", testName = "CreateSecurityGroupIfNeededTest") public class CreateSecurityGroupIfNeededTest extends BaseNovaApiExpectTest { HttpRequest create = HttpRequest.builder().method("POST").endpoint( URI.create("https://az-1.region-a.geo-1.compute.hpcloudsvc.com/v2/3456/os-security-groups")).headers( ImmutableMultimap.<String, String> builder().put("Accept", "application/json").put("X-Auth-Token", authToken).build()) .payload( payloadFromStringWithContentType( "{\"security_group\":{\"name\":\"jclouds_mygroup\",\"description\":\"jclouds_mygroup\"}}", "application/json")).build(); Location provider = new LocationBuilder().scope(LocationScope.PROVIDER).id("openstack-nova").description( "openstack-nova").build(); Location region = new LocationBuilder().id("az-1.region-a.geo-1").description("az-1.region-a.geo-1").scope( LocationScope.REGION).parent(provider).build(); Supplier<Map<String, Location>> locationIndex = Suppliers.<Map<String, Location>> ofInstance(ImmutableMap .<String, Location> of("az-1.region-a.geo-1", region)); Function<SecurityGroupInRegion, org.jclouds.compute.domain.SecurityGroup> securityGroupInRegionSecurityGroupFunction = new NovaSecurityGroupInRegionToSecurityGroup(locationIndex); Injector injector = createInjector(Functions.forMap(ImmutableMap.<HttpRequest, HttpResponse>of()), createModule(), setupProperties()); NeutronSecurityGroupToSecurityGroup.Factory factory = injector.getInstance(NeutronSecurityGroupToSecurityGroup.Factory.class); private final int groupId = 2769; public void testCreateNewGroup() throws Exception { Builder<HttpRequest, HttpResponse> builder = ImmutableMap.builder(); builder.put(keystoneAuthWithUsernameAndPasswordAndTenantName, responseWithKeystoneAccess); builder.put(extensionsOfNovaRequest, extensionsOfNovaResponse); HttpResponse createResponse = HttpResponse.builder().statusCode(200) .payload( payloadFromStringWithContentType( String.format("{\"security_group\": {\"rules\": [], \"tenant_id\": \"37936628937291\", \"id\": %s, \"name\": \"jclouds_mygroup\", \"description\": \"jclouds_mygroup\"}}", groupId), "application/json; charset=UTF-8")).build(); builder.put(create, createResponse); int ruleId = 10331; for (int port : ImmutableList.of(22, 8080)) { HttpRequest createCidrRule = HttpRequest.builder().method("POST").endpoint( URI.create("https://az-1.region-a.geo-1.compute.hpcloudsvc.com/v2/3456/os-security-group-rules")).headers( ImmutableMultimap.<String, String> builder().put("Accept", "application/json").put("X-Auth-Token", authToken).build()) .payload( payloadFromStringWithContentType( String.format("{\"security_group_rule\":{\"parent_group_id\":\"%s\",\"cidr\":\"0.0.0.0/0\",\"ip_protocol\":\"tcp\",\"from_port\":\"%d\",\"to_port\":\"%d\"}}", groupId, port, port), "application/json")).build(); HttpResponse createCidrRuleResponse = HttpResponse.builder().statusCode(200) .payload( payloadFromStringWithContentType( String.format("{\"security_group_rule\": {\"from_port\": %d, \"group\": {}, \"ip_protocol\": \"tcp\", \"to_port\": %d, \"parent_group_id\": %d, \"ip_range\": {\"cidr\": \"0.0.0.0/0\"}, \"id\": %d}}", port, port, groupId, ruleId++), "application/json; charset=UTF-8")).build(); builder.put(createCidrRule, createCidrRuleResponse); HttpRequest createSelfRule = HttpRequest.builder().method("POST").endpoint( URI.create("https://az-1.region-a.geo-1.compute.hpcloudsvc.com/v2/3456/os-security-group-rules")).headers( ImmutableMultimap.<String, String> builder().put("Accept", "application/json").put("X-Auth-Token", authToken).build()) .payload( payloadFromStringWithContentType( String.format("{\"security_group_rule\":{\"group_id\":\"%d\",\"parent_group_id\":\"%d\",\"ip_protocol\":\"tcp\",\"from_port\":\"%d\",\"to_port\":\"%d\"}}", groupId, groupId, port, port), "application/json")).build(); // note server responds with group name in the rule!! HttpResponse createSelfRuleResponse = HttpResponse.builder().statusCode(200) .payload( payloadFromStringWithContentType( String.format("{\"security_group_rule\": {\"from_port\": %d, \"group\": {\"tenant_id\": \"37936628937291\", \"name\": \"jclouds_mygroup\"}, \"ip_protocol\": \"tcp\", \"to_port\": %d, \"parent_group_id\": %d, \"ip_range\": {}, \"id\": %d}}", port, port, groupId, ruleId++), "application/json; charset=UTF-8")).build(); builder.put(createSelfRule, createSelfRuleResponse); } HttpRequest getSecurityGroup = HttpRequest.builder().method("GET").endpoint( URI.create("https://az-1.region-a.geo-1.compute.hpcloudsvc.com/v2/3456/os-security-groups/" + groupId)).headers( ImmutableMultimap.<String, String> builder().put("Accept", "application/json").put("X-Auth-Token", authToken).build()).build(); HttpResponse getSecurityGroupResponse = HttpResponse.builder().statusCode(200).payload( payloadFromResource("/securitygroup_details_computeservice_typical.json")).build(); builder.put(getSecurityGroup, getSecurityGroupResponse); HttpRequest listSecurityGroups = HttpRequest.builder().method("GET").endpoint( URI.create("https://az-1.region-a.geo-1.compute.hpcloudsvc.com/v2/3456/os-security-groups")).headers( ImmutableMultimap.<String, String> builder().put("Accept", "application/json").put("X-Auth-Token", authToken).build()).build(); HttpResponse listSecurityGroupsResponse = HttpResponse.builder().statusCode(200).payload( payloadFromResource("/securitygroup_list_details_computeservice_typical.json")).build(); builder.put(listSecurityGroups, listSecurityGroupsResponse); NovaApi apiCanCreateSecurityGroup = requestsSendResponses(builder.build()); CreateSecurityGroupIfNeeded fn = new CreateSecurityGroupIfNeeded(apiCanCreateSecurityGroup, locationIndex, securityGroupInRegionSecurityGroupFunction, factory); // we can find it org.jclouds.compute.domain.SecurityGroup expected = new SecurityGroupBuilder() .id("az-1.region-a.geo-1/2769") .providerId("2769") .name("jclouds_mygroup") .location(locationIndex.get().get("az-1.region-a.geo-1")) .ipPermissions(ImmutableList.of( IpPermission.builder() .ipProtocol(IpProtocol.TCP) .fromPort(22) .toPort(22) .cidrBlock("0.0.0.0/0") .build(), IpPermission.builder() .ipProtocol(IpProtocol.TCP) .fromPort(22) .toPort(22) .groupIds(ImmutableList.of("az-1.region-a.geo-1/2769")) .build(), IpPermission.builder() .ipProtocol(IpProtocol.TCP) .fromPort(8080) .toPort(8080) .cidrBlock("0.0.0.0/0") .build(), IpPermission.builder() .ipProtocol(IpProtocol.TCP) .fromPort(8080) .toPort(8080) .groupIds(ImmutableList.of("az-1.region-a.geo-1/2769")) .build() ) ) .build(); assertEquals( fn.apply(new RegionSecurityGroupNameAndPorts("az-1.region-a.geo-1", "jclouds_mygroup", ImmutableSet.of(22, 8080))).toString(), expected.toString().trim()); } public void testReturnExistingGroupOnAlreadyExists() throws Exception { Builder<HttpRequest, HttpResponse> builder = ImmutableMap.builder(); builder.put(keystoneAuthWithUsernameAndPasswordAndTenantName, responseWithKeystoneAccess); builder.put(extensionsOfNovaRequest, extensionsOfNovaResponse); HttpResponse createResponse = HttpResponse.builder().statusCode(400) .payload( payloadFromStringWithContentType( "{\"badRequest\": {\"message\": \"Security group test already exists\", \"code\": 400}}", "application/json; charset=UTF-8")).build(); builder.put(create, createResponse); HttpRequest list = HttpRequest.builder().method("GET").endpoint( URI.create("https://az-1.region-a.geo-1.compute.hpcloudsvc.com/v2/3456/os-security-groups")).headers( ImmutableMultimap.<String, String> builder().put("Accept", "application/json").put("X-Auth-Token", authToken).build()).build(); HttpResponse listResponse = HttpResponse.builder().statusCode(200).payload( payloadFromResource("/securitygroup_list_details_computeservice_typical.json")).build(); builder.put(list, listResponse); NovaApi apiWhenSecurityGroupsExist = requestsSendResponses(builder.build()); CreateSecurityGroupIfNeeded fn = new CreateSecurityGroupIfNeeded(apiWhenSecurityGroupsExist, locationIndex, securityGroupInRegionSecurityGroupFunction, factory); // we can find it org.jclouds.compute.domain.SecurityGroup expected = new SecurityGroupBuilder() .id("az-1.region-a.geo-1/2769") .providerId("2769") .name("jclouds_mygroup") .location(locationIndex.get().get("az-1.region-a.geo-1")) .ipPermissions(ImmutableList.of( IpPermission.builder() .ipProtocol(IpProtocol.TCP) .fromPort(22) .toPort(22) .cidrBlock("0.0.0.0/0") .build(), IpPermission.builder() .ipProtocol(IpProtocol.TCP) .fromPort(22) .toPort(22) .groupIds(ImmutableList.of("az-1.region-a.geo-1/2769")) .build(), IpPermission.builder() .ipProtocol(IpProtocol.TCP) .fromPort(8080) .toPort(8080) .cidrBlock("0.0.0.0/0") .build(), IpPermission.builder() .ipProtocol(IpProtocol.TCP) .fromPort(8080) .toPort(8080) .groupIds(ImmutableList.of("az-1.region-a.geo-1/2769")) .build() ) ) .build(); assertEquals( fn.apply(new RegionSecurityGroupNameAndPorts("az-1.region-a.geo-1", "jclouds_mygroup", ImmutableSet.of(22, 8080))).toString(), expected.toString().trim() ); } }
6,736
4,879
<reponame>kudlav/organicmaps #import "MWMTableViewController.h" @interface MWMRecentTrackSettingsController : MWMTableViewController @end
44
2,073
<reponame>alexey-anufriev/pac4j package org.pac4j.oauth.client; import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.databind.ObjectMapper; import org.junit.Test; import org.pac4j.core.util.TestsConstants; import org.pac4j.oauth.profile.casoauthwrapper.CasOAuthWrapperProfileDefinition; import java.io.IOException; import java.io.StringWriter; import java.util.HashMap; import java.util.Map; import static org.junit.Assert.*; /** * Tests the profile parsing from the {@link CasOAuthWrapperClient}. * * @author <NAME> * @since 1.9.2 */ public final class CasOAuthWrapperClientTests implements TestsConstants { @Test public void testParsingAttributesCASServerV4_2AndBefore() throws IOException { final var jsonFactory = new JsonFactory(new ObjectMapper()); final Map<String, Object> attributes = new HashMap<>(); attributes.put(KEY, VALUE); attributes.put(NAME, TOKEN); final var writer = new StringWriter(); try (var jsonGenerator = jsonFactory.createJsonGenerator(writer)) { jsonGenerator.writeStartObject(); jsonGenerator.writeStringField("id", ID); jsonGenerator.writeArrayFieldStart("attributes"); for (final var entry : attributes.entrySet()) { jsonGenerator.writeStartObject(); jsonGenerator.writeObjectField(entry.getKey(), entry.getValue()); jsonGenerator.writeEndObject(); } jsonGenerator.writeEndArray(); jsonGenerator.writeEndObject(); } final var body = writer.toString(); final var client = new CasOAuthWrapperClient(); client.setKey(KEY); client.setSecret(SECRET); client.setCasOAuthUrl(CALLBACK_URL); client.setCallbackUrl(CALLBACK_URL); client.init(); final var profile = new CasOAuthWrapperProfileDefinition().extractUserProfile(body); assertEquals(ID, profile.getId()); assertEquals(2, profile.getAttributes().size()); assertEquals(VALUE, profile.getAttribute(KEY)); assertEquals(TOKEN, profile.getAttribute(NAME)); } @Test public void testParsingAttributesCASServerV5() throws IOException { final Map<String, Object> attributes = new HashMap<>(); attributes.put(KEY, VALUE); attributes.put(NAME, TOKEN); final Map<String, Object> map = new HashMap<>(); map.put("id", ID); map.put("attributes", attributes); final var body = new ObjectMapper() .writer() .withDefaultPrettyPrinter() .writeValueAsString(map); final var client = new CasOAuthWrapperClient(); client.setKey(KEY); client.setSecret(SECRET); client.setCasOAuthUrl(CALLBACK_URL); client.setCallbackUrl(CALLBACK_URL); client.init(); final var profile = new CasOAuthWrapperProfileDefinition().extractUserProfile(body); assertEquals(ID, profile.getId()); assertEquals(2, profile.getAttributes().size()); assertEquals(VALUE, profile.getAttribute(KEY)); assertEquals(TOKEN, profile.getAttribute(NAME)); } }
1,301
389
/* * Copyright 2014 Guidewire Software, Inc. */ package gw.internal.gosu.parser.java.classinfo; import com.sun.source.tree.AnnotationTree; import com.sun.source.tree.MethodTree; import com.sun.source.tree.ModifiersTree; import com.sun.source.tree.VariableTree; import com.sun.tools.javac.code.Flags; import com.sun.tools.javac.tree.JCTree; import gw.lang.reflect.IAnnotationInfo; import gw.lang.reflect.Modifier; import gw.lang.reflect.java.IJavaAnnotatedElement; import gw.lang.reflect.java.IJavaClassInfo; import java.lang.annotation.Annotation; import java.util.ArrayList; import java.util.List; public class JavaSourceModifierList implements IModifierList { private static final JavaSourceAnnotationInfo[] NONE = new JavaSourceAnnotationInfo[0]; private ModifiersTree _modifiersTree; private int _modifiers; private IJavaAnnotatedElement _owner; private JavaSourceAnnotationInfo[] _annotations; /** * For enum constants. */ public JavaSourceModifierList(JavaSourceEnumConstant owner, ModifiersTree modifiersTree, int modifiers) { _owner = owner; _modifiers = modifiers; _modifiersTree = modifiersTree; } public JavaSourceModifierList(IJavaAnnotatedElement owner, ModifiersTree modifiersTree) { _owner = owner; _modifiersTree = modifiersTree; _modifiers = (int)((JCTree.JCModifiers)_modifiersTree).flags; IJavaClassInfo declaringOwner = owner instanceof JavaSourceType ? (IJavaClassInfo) owner : owner.getEnclosingClass(); if( declaringOwner.isInterface() || declaringOwner.isAnnotation() ) { if( !modifiersTree.getFlags().contains( javax.lang.model.element.Modifier.DEFAULT ) && !modifiersTree.getFlags().contains( javax.lang.model.element.Modifier.STATIC ) ) { _modifiers |= Modifier.ABSTRACT; } if (owner instanceof JavaSourceField) { _modifiers |= Modifier.STATIC; } if( owner instanceof JavaSourceField || owner instanceof JavaSourceMethod ) { _modifiers |= Modifier.PUBLIC; } } // Types, nested in interfaces are public and static if( owner instanceof JavaSourceType && declaringOwner.getEnclosingClass() instanceof JavaSourceInterface ) { _modifiers |= Modifier.PUBLIC; _modifiers |= Modifier.STATIC; } if (declaringOwner.getEnclosingClass() == null && !hasModifier(Modifier.PUBLIC) && !hasModifier(Modifier.PROTECTED) && !hasModifier(Modifier.PRIVATE)) { _modifiers |= Modifier.INTERNAL; } if( owner instanceof JavaSourceMethod ) { // var args for( VariableTree p: ((MethodTree)((JavaSourceMethod)owner).getTree()).getParameters() ) { if( p instanceof JCTree.JCVariableDecl && (((JCTree.JCVariableDecl)p).mods.flags & Flags.VARARGS) != 0 ) { _modifiers |= 0x00000080; // from Modifier.VARARGS non-public id break; } } } } public boolean hasModifier(int modifierType) { return (_modifiers & modifierType) != 0; } @Override public int getModifiers() { return _modifiers; } private void maybeInitAnnotations() { if (_annotations == null) { List<? extends AnnotationTree> annotationsTrees = _modifiersTree.getAnnotations(); if (annotationsTrees.isEmpty()) { _annotations = NONE; } else { List<JavaSourceAnnotationInfo> annotations = new ArrayList<JavaSourceAnnotationInfo>(); for (AnnotationTree annotationTree : annotationsTrees) { annotations.add(new JavaSourceAnnotationInfo(annotationTree, _owner)); } _annotations = annotations.toArray(new JavaSourceAnnotationInfo[annotations.size()]); } } } @Override public boolean isAnnotationPresent(Class<? extends Annotation> annotationClass) { return getAnnotation(annotationClass) != null; } public IAnnotationInfo[] getAnnotations() { maybeInitAnnotations(); return _annotations; } @Override public IAnnotationInfo getAnnotation(Class annotationClass) { maybeInitAnnotations(); for (JavaSourceAnnotationInfo annotation : _annotations) { if (annotation.getName().equals(annotationClass.getName().replace('$', '.'))) { return annotation; } } return null; } public void setModifiers(int modifiers) { this._modifiers = modifiers; } public String toString() { return _owner.toString(); } }
1,565
1,863
// // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2018 NVIDIA Corporation. All rights reserved. #ifndef SCOPED_PHYS_XLOCK_H #define SCOPED_PHYS_XLOCK_H #include "foundation/PxPreprocessor.h" #include "ApexDefs.h" #include "PxScene.h" #include "Scene.h" namespace nvidia { namespace apex { /** \brief This helper class creates a scoped read access to the PhysX SDK API This helper class is used to create a scoped read lock/unlock pair around a section of code which is trying to do read access against the PhysX SDK. */ class ScopedPhysXLockRead { public: /** \brief Constructor for ScopedPhysXLockRead \param[in] scene the APEX scene \param[in] fileName used to determine what file called the lock for debugging purposes \param[in] lineno used to determine what line number called the lock for debugging purposes */ ScopedPhysXLockRead(nvidia::Scene* scene, const char *fileName, int lineno) : mApexScene(scene), mPhysXScene(0) { if (mApexScene) { mApexScene->lockRead(fileName, (uint32_t)lineno); } } /** \brief Constructor for ScopedPhysXLockRead \param[in] scene the PhysX scene \param[in] fileName used to determine what file called the lock for debugging purposes \param[in] lineno used to determine what line number called the lock for debugging purposes */ ScopedPhysXLockRead(physx::PxScene* scene, const char *fileName, int lineno) : mPhysXScene(scene), mApexScene(0) { if (mPhysXScene) { mPhysXScene->lockRead(fileName, (uint32_t)lineno); } } ~ScopedPhysXLockRead() { if (mApexScene) { mApexScene->unlockRead(); } if (mPhysXScene) { mPhysXScene->unlockRead(); } } private: nvidia::Scene* mApexScene; physx::PxScene* mPhysXScene; }; /** \brief This helper class creates a scoped write access to the PhysX SDK API This helper class is used to create a scoped write lock/unlock pair around a section of code which is trying to do read access against the PhysX SDK. */ class ScopedPhysXLockWrite { public: /** \brief Constructor for ScopedPhysXLockWrite \param[in] scene the APEX scene \param[in] fileName used to determine what file called the lock for debugging purposes \param[in] lineno used to determine what line number called the lock for debugging purposes */ ScopedPhysXLockWrite(nvidia::Scene *scene, const char *fileName, int lineno) : mApexScene(scene), mPhysXScene(0) { if (mApexScene) { mApexScene->lockWrite(fileName, (uint32_t)lineno); } } /** \brief Constructor for ScopedPhysXLockWrite \param[in] scene the PhysX scene \param[in] fileName used to determine what file called the lock for debugging purposes \param[in] lineno used to determine what line number called the lock for debugging purposes */ ScopedPhysXLockWrite(physx::PxScene *scene, const char *fileName, int lineno) : mPhysXScene(scene), mApexScene(0) { if (mPhysXScene) { mPhysXScene->lockWrite(fileName, (uint32_t)lineno); } } ~ScopedPhysXLockWrite() { if (mApexScene) { mApexScene->unlockWrite(); } if (mPhysXScene) { mPhysXScene->unlockWrite(); } } private: nvidia::Scene* mApexScene; physx::PxScene* mPhysXScene; }; } } #if defined(_DEBUG) || PX_CHECKED /** \brief This macro creates a scoped write lock/unlock pair */ #define SCOPED_PHYSX_LOCK_WRITE(x) nvidia::apex::ScopedPhysXLockWrite _wlock(x,__FILE__,__LINE__); #else /** \brief This macro creates a scoped write lock/unlock pair */ #define SCOPED_PHYSX_LOCK_WRITE(x) nvidia::apex::ScopedPhysXLockWrite _wlock(x,"",0); #endif #if defined(_DEBUG) || PX_CHECKED /** \brief This macro creates a scoped read lock/unlock pair */ #define SCOPED_PHYSX_LOCK_READ(x) nvidia::apex::ScopedPhysXLockRead _rlock(x,__FILE__,__LINE__); #else /** \brief This macro creates a scoped read lock/unlock pair */ #define SCOPED_PHYSX_LOCK_READ(x) nvidia::apex::ScopedPhysXLockRead _rlock(x,"",0); #endif #endif
2,030
412
<gh_stars>100-1000 /* Copyright 2015 Google Inc. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "graphd/graphd.h" #include "graphd/graphd-write.h" #include <string.h> #include <errno.h> #include <stdio.h> #include <stdbool.h> /* Make sure that any unique clusters in a write request * don't already exist. * * - identify and duplicate selections from the unique constraints * - match the selection against the database * - if anything _doesn't_ return GRAPHD_ERR_NO, * fail with GRAPHD_ERR_UNIQUE_EXISTS. */ #define HAS_GUID(guidcon) \ ((guidcon).guidcon_include_valid && (guidcon).guidcon_include.gs_n == 1 && \ !GRAPH_GUID_IS_NULL((guidcon).guidcon_include.gs_guid[0])) /* Utility: Is the connection between parent and child part * of the unique constraint? */ static bool connection_is_part_of_unique_constraint(graphd_constraint *con) { if (con == NULL || con->con_parent == NULL) return false; return !!(graphd_linkage_is_my(con->con_linkage) ? (con->con_unique & (1 << GRAPHD_PATTERN_LINKAGE( graphd_linkage_my(con->con_linkage)))) : (con->con_parent->con_unique & (1 << GRAPHD_PATTERN_LINKAGE( graphd_linkage_i_am(con->con_linkage))))); } /* Utility: Return next constraint in traversal order: self, children, next. */ static graphd_constraint *next_constraint(graphd_constraint *con) { /* Children? */ if (con->con_head != NULL) return con->con_head; /* Next? if not, go up (but do not revisit). */ while (con->con_next == NULL) { if (con->con_parent == NULL) return NULL; con = con->con_parent; } return con->con_next; } /* Utility: Is this constraint the root of a cluster of constraints * that have unique annotations and are connected by unique * links? */ static bool is_unique_cluster_root(graphd_constraint *con) { if (con == NULL || !con->con_unique) return false; if (con->con_parent == NULL || con->con_parent->con_unique == 0) return true; /* Is the connection between con and con's parent * part of the key of the connection holder? * * If yes, then this child was already included * in the parent's cluster. */ return !connection_is_part_of_unique_constraint(con); } /* Utility: make result=() */ static int make_result_pattern(graphd_request *greq, graphd_constraint *con) { con->con_result = graphd_pattern_alloc(greq, NULL, GRAPHD_PATTERN_LIST); if (con->con_result == NULL) return errno ? errno : ENOMEM; con->con_uses_contents = graphd_constraint_uses_contents(con); return 0; } /** * @brief Utility: Make an empty linked constraint. * * @param greq request context this is happening for. * @param linkage what this constraint is to its parent. * * @result a new constraint with nothing in it, other * than the linkage. */ static graphd_constraint *make_empty_linked_constraint(graphd_request *greq, int linkage) { graphd_constraint *out = NULL; int err; if ((out = cm_malloc(greq->greq_req.req_cm, sizeof(*out))) == NULL) return NULL; graphd_constraint_initialize(graphd_request_graphd(greq), out); out->con_linkage = graphd_linkage_make_i_am(linkage); /* Implicit aspects: must be live, must be the newest version; * pagesize is 1. */ out->con_live = GRAPHD_FLAG_TRUE; out->con_newest.gencon_valid = 1; out->con_newest.gencon_min = 0; out->con_newest.gencon_max = 0; out->con_resultpagesize_valid = true; out->con_resultpagesize = 1; out->con_countlimit_valid = true; out->con_countlimit = 1; out->con_archival = GRAPHD_FLAG_DONTCARE; out->con_count.countcon_min_valid = true; out->con_count.countcon_min = 1; /* Result=() */ if ((err = make_result_pattern(greq, out)) != 0 || (err = graphd_pattern_frame_create(greq, out)) != 0) { cm_free(greq->greq_req.req_cm, out); return NULL; } return out; } /** * @brief Utility: Make a duplicate of <in> into <out> (both used in * the same request). * * We're allocating on the request heap where needed, and are otherwise * reusing data from the original request. * * @param greq request context this is happening for. * @param in bind this constraint. * * @return a duplicate of in */ static graphd_constraint *duplicate_unique_cluster(graphd_request *greq, graphd_constraint *in) { graphd_constraint *out = NULL; cl_handle *cl = graphd_request_cl(greq); graphd_constraint *out_sub; graphd_constraint *in_sub; int i, err; int unq; cl_enter(cl, CL_LEVEL_SPEW, "(in:%s)", graphd_constraint_to_string(in)); if ((out = cm_malloc(greq->greq_req.req_cm, sizeof(*out))) == NULL) { cl_leave(cl, CL_LEVEL_SPEW, "out of memory"); return NULL; } graphd_constraint_initialize(graphd_request_graphd(greq), out); unq = in->con_unique; if (unq & (1 << GRAPHD_PATTERN_NAME)) out->con_name = in->con_name; if (unq & (1 << GRAPHD_PATTERN_VALUE)) { out->con_value = in->con_value; out->con_value_comparator = in->con_value_comparator; } if (unq & (1 << GRAPHD_PATTERN_TYPEGUID)) out->con_type = in->con_type; if (unq & ((1 << GRAPHD_PATTERN_DATATYPE) | (1 << GRAPHD_PATTERN_VALUETYPE))) out->con_valuetype = in->con_valuetype; for (i = 0; i < PDB_LINKAGE_N; i++) if (unq & (1 << GRAPHD_PATTERN_LINKAGE(i))) out->con_linkcon[i] = in->con_linkcon[i]; if (connection_is_part_of_unique_constraint(in)) out->con_linkage = in->con_linkage; if (unq & (1 << GRAPHD_PATTERN_TIMESTAMP)) { out->con_timestamp_valid = in->con_timestamp_valid; out->con_timestamp_min = in->con_timestamp_min; out->con_timestamp_max = in->con_timestamp_max; } /* Implicit aspects: must be live, must be the newest version; * pagesize is 1. */ out->con_live = GRAPHD_FLAG_TRUE; out->con_newest.gencon_valid = 1; out->con_newest.gencon_min = 0; out->con_newest.gencon_max = 0; out->con_count.countcon_min_valid = true; out->con_count.countcon_min = 1; out->con_resultpagesize_valid = true; out->con_resultpagesize = 1; out->con_countlimit_valid = true; out->con_countlimit = 1; out->con_archival = GRAPHD_FLAG_DONTCARE; /* If the write has a GUID constraint - that is, if it * versions another GUID or lineage - exclude that GUID * (or that lineage's head) from the match for the purposes * of unique. */ if (HAS_GUID(in->con_guid) && !in->con_guid.guidcon_include_annotated) { out->con_guid.guidcon_exclude_valid = true; graphd_guid_set_initialize(&out->con_guid.guidcon_exclude); err = graphd_guid_set_add(greq, &out->con_guid.guidcon_exclude, in->con_guid.guidcon_include.gs_guid); if (err != 0) { /* Can't currently happen, actually. */ cl_leave(cl, CL_LEVEL_SPEW, "allocation error"); return NULL; } out->con_guid.guidcon_exclude_valid = true; } for (in_sub = in->con_head; in_sub != NULL; in_sub = in_sub->con_next) { if (!connection_is_part_of_unique_constraint(in_sub)) continue; if ((out_sub = duplicate_unique_cluster(greq, in_sub)) == NULL) { /* Children go unfree'd - no big deal, they're * on the request heap. */ cl_leave(cl, CL_LEVEL_SPEW, "recursive error"); return NULL; } graphd_constraint_append(out, out_sub); /* If the subconstraint already knows its GUID, include * the GUID itself in the constraint set as a linkage. */ if (graphd_linkage_is_i_am(in_sub->con_linkage) && HAS_GUID(in_sub->con_guid)) { int linkage = graphd_linkage_i_am(in_sub->con_linkage); err = graphd_guid_constraint_intersect_with_guid( greq, out, out->con_linkcon + linkage, in_sub->con_guid.guidcon_include.gs_guid); if (err != 0) { cl_leave(cl, CL_LEVEL_VERBOSE, "GUID intersect fails: %s", graphd_strerror(err)); return NULL; } cl_log(cl, CL_LEVEL_VERBOSE, "duplicate_unique_cluster: sub con %s " "knows its guid", graphd_constraint_to_string(in_sub)); } } cl_assert(cl, out->con_subcon_n <= in->con_subcon_n); /* If our parent connection is unique, but the parent * itself isn't, reflect the parent into a subconstraint. */ if (in->con_parent != NULL && in->con_parent->con_unique == GRAPHD_PATTERN_UNSPECIFIED && connection_is_part_of_unique_constraint(in)) { graphd_constraint *par = in->con_parent; /* It must be an "is-my" linkage, where I'm pointing * to the parent -- otherwise, the parent would have * to have a unique tag for it to be a unique linkage. */ cl_assert(cl, graphd_linkage_is_my(in->con_linkage)); /* If the parent constraint already knows its GUID, include * the GUID itself in the constraint set as a linkage. */ if (HAS_GUID(par->con_guid)) { int linkage = graphd_linkage_my(in->con_linkage); err = graphd_guid_constraint_intersect_with_guid( greq, out, out->con_linkcon + linkage, par->con_guid.guidcon_include.gs_guid); if (err != 0) { cl_leave(cl, CL_LEVEL_VERBOSE, "GUID intersect fails: %s", graphd_strerror(err)); return NULL; } cl_log(cl, CL_LEVEL_VERBOSE, "duplicate_unique_cluster: parent con %s " "knows its guid", graphd_constraint_to_string(par)); } else { out_sub = make_empty_linked_constraint( greq, graphd_linkage_my(in->con_linkage)); if (out_sub == NULL) { cl_leave(cl, CL_LEVEL_SPEW, "failed to " "allocate linked constraint"); return NULL; } graphd_constraint_append(out, out_sub); } } /* Result=() */ if ((err = make_result_pattern(greq, out)) != 0 || (err = graphd_pattern_frame_create(greq, out)) != 0) { cl_leave(cl, CL_LEVEL_SPEW, "result pattern error: %s", graphd_strerror(err)); return NULL; } cl_leave(cl, CL_LEVEL_SPEW, "%s", graphd_constraint_to_string(out)); return out; } typedef struct { graphd_stack_context cuc_sc; graphd_constraint *cuc_con; graphd_request *cuc_greq; int cuc_err; int *cuc_err_out; graphd_value cuc_value; } check_unique_context; /** * @brief Annotate context resource method: free. * @param resource_manager_data opaque application handle for all * resources in this manager, ignored * @param resoure_data the graphd_stack, cast to void * */ static void check_unique_context_resource_free(void *resource_manager_data, void *resource_data) { check_unique_context *cuc = resource_data; graphd_value_finish(graphd_request_cl(cuc->cuc_greq), &cuc->cuc_value); cm_free(cuc->cuc_greq->greq_req.req_cm, cuc); } /** * @brief Annotate context resource method: list. * * @param log_data a cl_handle, cast to void * * @param resource_manager_data opaque application handle for all * resources in this manager, ignored * @param resoure_data the graphd_stack, cast to void * */ static void check_unique_context_resource_list(void *log_data, void *resource_manager_data, void *resource_data) { cl_handle *cl = log_data; check_unique_context *cuc = resource_data; cl_log(cl, CL_LEVEL_VERBOSE, "write unique=() checking context @ %p", (void *)cuc); } /** * @brief Check unique context resource type */ static cm_resource_type check_unique_context_resource_type = { "write unique=() check context", check_unique_context_resource_free, check_unique_context_resource_list}; static int check_unique_run_read_results(graphd_stack *stack, graphd_stack_context *stack_context); /** * @brief Check unique context stack-context method: run (1) * * This is called directly after the context has * been pushed on stack. * * @param stack Stack we're running on * @param stack_context Specific context */ static int check_unique_run(graphd_stack *stack, graphd_stack_context *stack_context) { check_unique_context *cuc = (void *)stack_context; cl_handle *cl = graphd_request_cl(cuc->cuc_greq); cl_enter(cl, CL_LEVEL_SPEW, "enter"); /* Fast forward through the tree until we're * standing on the root of a unique cluster. */ while (cuc->cuc_con != NULL && !is_unique_cluster_root(cuc->cuc_con)) cuc->cuc_con = next_constraint(cuc->cuc_con); if (cuc->cuc_con == NULL) { /* Done. */ graphd_stack_pop(stack); cl_leave(cl, CL_LEVEL_SPEW, "done"); return 0; } /* Duplicate the unique cluster. */ cuc->cuc_con->con_unique_dup = duplicate_unique_cluster(cuc->cuc_greq, cuc->cuc_con); if (cuc->cuc_con->con_unique_dup == NULL) { if (cuc->cuc_err_out != NULL) *cuc->cuc_err_out = errno ? errno : ENOMEM; graphd_stack_pop(stack); cl_leave(cl, CL_LEVEL_SPEW, "error (stored)"); return 0; } /* Run the unique cluster as a query. The response * will be delivered to the next function, below. */ graphd_stack_resume(stack, stack_context, check_unique_run_read_results); graphd_read_push(cuc->cuc_greq, cuc->cuc_con->con_unique_dup, &cuc->cuc_value, &cuc->cuc_err); cl_leave(cl, CL_LEVEL_SPEW, "-> read"); return 0; } static int check_unique_freeze(graphd_stack *stack, graphd_stack_context *stack_context) { return PDB_ERR_MORE; } static int check_unique_thaw(graphd_stack *stack, graphd_stack_context *stack_context) { return GRAPHD_ERR_NO; } static graphd_stack_type check_unique_type = { check_unique_run, check_unique_freeze, check_unique_thaw}; /** * @brief Annotate context stack-context method: run (2) * * This deals with the results from the read that's tried * to find matches for a unique cluster. * * @param stack Stack we're running on * @param stack_context Specific context */ static int check_unique_run_read_results(graphd_stack *stack, graphd_stack_context *stack_context) { check_unique_context *cuc = (void *)stack_context; cl_handle *cl = graphd_request_cl(cuc->cuc_greq); cl_enter(cl, CL_LEVEL_SPEW, "enter"); if (cuc->cuc_err == 0) cuc->cuc_err = GRAPHD_ERR_UNIQUE_EXISTS; else if (cuc->cuc_err == GRAPHD_ERR_NO) cuc->cuc_err = 0; graphd_value_finish(cl, &cuc->cuc_value); if (cuc->cuc_err != 0) { *cuc->cuc_err_out = cuc->cuc_err; cl_leave(cl, CL_LEVEL_SPEW, "%s", graphd_strerror(cuc->cuc_err)); graphd_stack_pop(stack); return 0; } cuc->cuc_con = next_constraint(cuc->cuc_con); graphd_stack_resume(stack, stack_context, check_unique_run); cl_leave(cl, CL_LEVEL_SPEW, "leave"); return 0; } /** * @brief Push a context on the stack that will check * unique clusters in a constraint tree. * * This module returns GRAPHD_ERR_UNIQUE_EXISTS if any of the * constraint clusters marked as "unique" already exist in the * database. * * (Unique constraint clusters are subtrees of constraints * that have "unique" clauses in them and are connected by * linkage listed in its owner's "unique" clause.) * * @param greq Request whose stack we're pushing on * @param con Root of the request's constraint tree * @param err_out return errors here. */ void graphd_write_check_unique_push(graphd_request *greq, graphd_constraint *con, int *err_out) { check_unique_context *cuc; cl_handle *cl = graphd_request_cl(greq); cl_enter(cl, CL_LEVEL_SPEW, "enter"); cl_assert(cl, err_out != NULL); *err_out = 0; cuc = cm_zalloc(greq->greq_req.req_cm, sizeof(*cuc)); if (cuc == NULL) { int err = errno; cl_leave(cl, CL_LEVEL_ERROR, "failed to allocate context: %s", strerror(err)); *err_out = err ? err : ENOMEM; return; } graphd_value_initialize(&cuc->cuc_value); cuc->cuc_greq = greq; cuc->cuc_con = con; cuc->cuc_err_out = err_out; *err_out = 0; graphd_stack_push(&greq->greq_stack, &cuc->cuc_sc, &check_unique_context_resource_type, &check_unique_type); cl_leave(cl, CL_LEVEL_SPEW, "leave"); }
7,192
848
/*Copyright 2019 Xilinx Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.*/ /* Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef TENSORFLOW_CONTRIB_DECENT_Q_UTILS_FOLD_BATCHNORMS_H_ #define TENSORFLOW_CONTRIB_DECENT_Q_UTILS_FOLD_BATCHNORMS_H_ #include "tensorflow/contrib/decent_q/utils/transform_utils.h" #include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/lib/core/status.h" namespace tensorflow { namespace decent_q { // Get the merged weights of folded conv and batchnorm Status GetMergedConvWeights(const NodeDef& conv_node, const NodeDef& weights_node, const NodeDef& mul_values_node, NodeDef* scaled_weights_node); // Get the merged biases of folded conv, biasadd and batchnorm Status GetMergedConvBiases(const NodeDef& bias_node, const NodeDef& mul_values_node, const NodeDef& add_values_node, NodeDef* scaled_bias_node); // Finds monolithic batch norm ops (as used in early versions of TensorFlow) and // converts them into premultiplied weight inputs to convolutions. Status FoldOldBatchNorms(const GraphDef& input_graph_def, GraphDef* output_graph_def); // Finds monolithic batch norm ops (as used in early versions of TensorFlow) and // converts them into premultiplied weight inputs to convolutions. Status UpdateOldBatchNorms(const GraphDef& input_graph_def, GraphDef* output_graph_def); // Converts Conv2D or MatMul ops followed by column-wise Muls into equivalent // ops with the Mul baked into the convolution weights, to save computation // during inference. Status FoldBatchNormsInference(const GraphDef& input_graph_def, GraphDef* output_graph_def); // Converts Conv2D and a mul ops into a Conv2D Status FoldConvMulInference(const GraphDef& input_graph_def, GraphDef* output_graph_def); // Fold batchnorms for quantize training Status FoldBatchNormsTraining(const GraphDef& input_graph_def, GraphDef* output_graph_def); Status FoldBatchNorms(const GraphDef& input_graph_def, GraphDef* output_graph_def, bool is_training = false); // Command Wrapper for Decent_Q Graph Transform Status FoldBatchNormsCommand(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def); } // namespace decent_q } // namespace tensorflow #endif // TENSORFLOW_CONTRIB_DECENT_Q_UTILS_INSERT_FIX_NEURON_OPS_H_
1,370
2,414
/* * <!-- * ~ Copyright 2019 WeBank * ~ * ~ Licensed under the Apache License, Version 2.0 (the "License"); * ~ you may not use this file except in compliance with the License. * ~ You may obtain a copy of the License at * ~ * ~ http://www.apache.org/licenses/LICENSE-2.0 * ~ * ~ Unless required by applicable law or agreed to in writing, software * ~ distributed under the License is distributed on an "AS IS" BASIS, * ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * ~ See the License for the specific language governing permissions and * ~ limitations under the License. * --> * */ package com.webank.wedatasphere.linkis.governance.common.entity.job; import java.util.List; /** * @date 2021/5/17 * @description */ public class JobRequestWithDetail extends JobRequest { public JobRequestWithDetail() {} public JobRequestWithDetail(JobRequest jobRequest) { setId(jobRequest.getId()); setReqId(jobRequest.getReqId()); setSubmitUser(jobRequest.getSubmitUser()); setExecuteUser(jobRequest.getExecuteUser()); setSource(jobRequest.getSource()); setExecutionCode(jobRequest.getExecutionCode()); setLabels(jobRequest.getLabels()); setParams(jobRequest.getParams()); setProgress(jobRequest.getProgress()); setStatus(jobRequest.getStatus()); setLogPath(jobRequest.getLogPath()); setErrorCode(jobRequest.getErrorCode()); setErrorDesc(jobRequest.getErrorDesc()); setCreatedTime(jobRequest.getCreatedTime()); setUpdatedTime(jobRequest.getUpdatedTime()); setInstances(jobRequest.getInstances()); setMetrics(jobRequest.getMetrics()); } private List<SubJobDetail> subJobDetailList; public List<SubJobDetail> getSubJobDetailList() { return subJobDetailList; } public JobRequestWithDetail setSubJobDetailList(List<SubJobDetail> subJobDetailList) { this.subJobDetailList = subJobDetailList; return this; } }
746
2,151
/* * Mesa 3-D graphics library * Version: 7.1 * * Copyright (C) 1999-2007 <NAME> All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * <NAME> BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "main/glheader.h" #include "main/context.h" #include "main/imports.h" #include "main/format_pack.h" #include "main/format_unpack.h" #include "s_context.h" #include "s_depth.h" #include "s_stencil.h" #include "s_span.h" /* Stencil Logic: IF stencil test fails THEN Apply fail-op to stencil value Don't write the pixel (RGBA,Z) ELSE IF doing depth test && depth test fails THEN Apply zfail-op to stencil value Write RGBA and Z to appropriate buffers ELSE Apply zpass-op to stencil value ENDIF */ /** * Compute/return the offset of the stencil value in a pixel. * For example, if the format is Z24+S8, the position of the stencil bits * within the 4-byte pixel will be either 0 or 3. */ static GLint get_stencil_offset(gl_format format) { const GLubyte one = 1; GLubyte pixel[MAX_PIXEL_BYTES]; GLint bpp = _mesa_get_format_bytes(format); GLint i; assert(_mesa_get_format_bits(format, GL_STENCIL_BITS) == 8); memset(pixel, 0, sizeof(pixel)); _mesa_pack_ubyte_stencil_row(format, 1, &one, pixel); for (i = 0; i < bpp; i++) { if (pixel[i]) return i; } _mesa_problem(NULL, "get_stencil_offset() failed\n"); return 0; } /** Clamp the stencil value to [0, 255] */ static inline GLubyte clamp(GLint val) { if (val < 0) return 0; else if (val > 255) return 255; else return val; } #define STENCIL_OP(NEW_VAL) \ if (invmask == 0) { \ for (i = j = 0; i < n; i++, j += stride) { \ if (mask[i]) { \ GLubyte s = stencil[j]; \ (void) s; \ stencil[j] = (GLubyte) (NEW_VAL); \ } \ } \ } \ else { \ for (i = j = 0; i < n; i++, j += stride) { \ if (mask[i]) { \ GLubyte s = stencil[j]; \ stencil[j] = (GLubyte) ((invmask & s) | (wrtmask & (NEW_VAL))); \ } \ } \ } /** * Apply the given stencil operator to the array of stencil values. * Don't touch stencil[i] if mask[i] is zero. * @param n number of stencil values * @param oper the stencil buffer operator * @param face 0 or 1 for front or back face operation * @param stencil array of stencil values (in/out) * @param mask array [n] of flag: 1=apply operator, 0=don't apply operator * @param stride stride between stencil values */ static void apply_stencil_op(const struct gl_context *ctx, GLenum oper, GLuint face, GLuint n, GLubyte stencil[], const GLubyte mask[], GLint stride) { const GLubyte ref = ctx->Stencil.Ref[face]; const GLubyte wrtmask = ctx->Stencil.WriteMask[face]; const GLubyte invmask = (GLubyte) (~wrtmask); GLuint i, j; switch (oper) { case GL_KEEP: /* do nothing */ break; case GL_ZERO: /* replace stencil buf values with zero */ STENCIL_OP(0); break; case GL_REPLACE: /* replace stencil buf values with ref value */ STENCIL_OP(ref); break; case GL_INCR: /* increment stencil buf values, with clamping */ STENCIL_OP(clamp(s + 1)); break; case GL_DECR: /* increment stencil buf values, with clamping */ STENCIL_OP(clamp(s - 1)); break; case GL_INCR_WRAP_EXT: /* increment stencil buf values, without clamping */ STENCIL_OP(s + 1); break; case GL_DECR_WRAP_EXT: /* increment stencil buf values, without clamping */ STENCIL_OP(s - 1); break; case GL_INVERT: /* replace stencil buf values with inverted value */ STENCIL_OP(~s); break; default: _mesa_problem(ctx, "Bad stencil op in apply_stencil_op"); } } #define STENCIL_TEST(FUNC) \ for (i = j = 0; i < n; i++, j += stride) { \ if (mask[i]) { \ s = (GLubyte) (stencil[j] & valueMask); \ if (FUNC) { \ /* stencil pass */ \ fail[i] = 0; \ } \ else { \ /* stencil fail */ \ fail[i] = 1; \ mask[i] = 0; \ } \ } \ else { \ fail[i] = 0; \ } \ } /** * Apply stencil test to an array of stencil values (before depth buffering). * For the values that fail, we'll apply the GL_STENCIL_FAIL operator to * the stencil values. * * @param face 0 or 1 for front or back-face polygons * @param n number of pixels in the array * @param stencil array of [n] stencil values (in/out) * @param mask array [n] of flag: 0=skip the pixel, 1=stencil the pixel, * values are set to zero where the stencil test fails. * @param stride stride between stencil values * @return GL_FALSE = all pixels failed, GL_TRUE = zero or more pixels passed. */ static GLboolean do_stencil_test(struct gl_context *ctx, GLuint face, GLuint n, GLubyte stencil[], GLubyte mask[], GLint stride) { SWcontext *swrast = SWRAST_CONTEXT(ctx); GLubyte *fail = swrast->stencil_temp.buf2; GLboolean allfail = GL_FALSE; GLuint i, j; const GLuint valueMask = ctx->Stencil.ValueMask[face]; const GLubyte ref = (GLubyte) (ctx->Stencil.Ref[face] & valueMask); GLubyte s; /* * Perform stencil test. The results of this operation are stored * in the fail[] array: * IF fail[i] is non-zero THEN * the stencil fail operator is to be applied * ELSE * the stencil fail operator is not to be applied * ENDIF */ switch (ctx->Stencil.Function[face]) { case GL_NEVER: STENCIL_TEST(0); allfail = GL_TRUE; break; case GL_LESS: STENCIL_TEST(ref < s); break; case GL_LEQUAL: STENCIL_TEST(ref <= s); break; case GL_GREATER: STENCIL_TEST(ref > s); break; case GL_GEQUAL: STENCIL_TEST(ref >= s); break; case GL_EQUAL: STENCIL_TEST(ref == s); break; case GL_NOTEQUAL: STENCIL_TEST(ref != s); break; case GL_ALWAYS: STENCIL_TEST(1); break; default: _mesa_problem(ctx, "Bad stencil func in gl_stencil_span"); return 0; } if (ctx->Stencil.FailFunc[face] != GL_KEEP) { apply_stencil_op(ctx, ctx->Stencil.FailFunc[face], face, n, stencil, fail, stride); } return !allfail; } /** * Compute the zpass/zfail masks by comparing the pre- and post-depth test * masks. */ static inline void compute_pass_fail_masks(GLuint n, const GLubyte origMask[], const GLubyte newMask[], GLubyte passMask[], GLubyte failMask[]) { GLuint i; for (i = 0; i < n; i++) { ASSERT(newMask[i] == 0 || newMask[i] == 1); passMask[i] = origMask[i] & newMask[i]; failMask[i] = origMask[i] & (newMask[i] ^ 1); } } /** * Get 8-bit stencil values from random locations in the stencil buffer. */ static void get_s8_values(struct gl_context *ctx, struct gl_renderbuffer *rb, GLuint count, const GLint x[], const GLint y[], GLubyte stencil[]) { struct swrast_renderbuffer *srb = swrast_renderbuffer(rb); const GLint w = rb->Width, h = rb->Height; const GLubyte *map = _swrast_pixel_address(rb, 0, 0); GLuint i; if (rb->Format == MESA_FORMAT_S8) { const GLint rowStride = srb->RowStride; for (i = 0; i < count; i++) { if (x[i] >= 0 && y[i] >= 0 && x[i] < w && y[i] < h) { stencil[i] = *(map + y[i] * rowStride + x[i]); } } } else { const GLint bpp = _mesa_get_format_bytes(rb->Format); const GLint rowStride = srb->RowStride; for (i = 0; i < count; i++) { if (x[i] >= 0 && y[i] >= 0 && x[i] < w && y[i] < h) { const GLubyte *src = map + y[i] * rowStride + x[i] * bpp; _mesa_unpack_ubyte_stencil_row(rb->Format, 1, src, &stencil[i]); } } } } /** * Put 8-bit stencil values at random locations into the stencil buffer. */ static void put_s8_values(struct gl_context *ctx, struct gl_renderbuffer *rb, GLuint count, const GLint x[], const GLint y[], const GLubyte stencil[]) { const GLint w = rb->Width, h = rb->Height; gl_pack_ubyte_stencil_func pack_stencil = _mesa_get_pack_ubyte_stencil_func(rb->Format); GLuint i; for (i = 0; i < count; i++) { if (x[i] >= 0 && y[i] >= 0 && x[i] < w && y[i] < h) { GLubyte *dst = _swrast_pixel_address(rb, x[i], y[i]); pack_stencil(&stencil[i], dst); } } } /** * /return GL_TRUE = one or more fragments passed, * GL_FALSE = all fragments failed. */ GLboolean _swrast_stencil_and_ztest_span(struct gl_context *ctx, SWspan *span) { SWcontext *swrast = SWRAST_CONTEXT(ctx); struct gl_framebuffer *fb = ctx->DrawBuffer; struct gl_renderbuffer *rb = fb->Attachment[BUFFER_STENCIL].Renderbuffer; const GLint stencilOffset = get_stencil_offset(rb->Format); const GLint stencilStride = _mesa_get_format_bytes(rb->Format); const GLuint face = (span->facing == 0) ? 0 : ctx->Stencil._BackFace; const GLuint count = span->end; GLubyte *mask = span->array->mask; GLubyte *stencilTemp = swrast->stencil_temp.buf1; GLubyte *stencilBuf; if (span->arrayMask & SPAN_XY) { /* read stencil values from random locations */ get_s8_values(ctx, rb, count, span->array->x, span->array->y, stencilTemp); stencilBuf = stencilTemp; } else { /* Processing a horizontal run of pixels. Since stencil is always * 8 bits for all MESA_FORMATs, we just need to use the right offset * and stride to access them. */ stencilBuf = _swrast_pixel_address(rb, span->x, span->y) + stencilOffset; } /* * Apply the stencil test to the fragments. * failMask[i] is 1 if the stencil test failed. */ if (!do_stencil_test(ctx, face, count, stencilBuf, mask, stencilStride)) { /* all fragments failed the stencil test, we're done. */ span->writeAll = GL_FALSE; if (span->arrayMask & SPAN_XY) { /* need to write the updated stencil values back to the buffer */ put_s8_values(ctx, rb, count, span->array->x, span->array->y, stencilTemp); } return GL_FALSE; } /* * Some fragments passed the stencil test, apply depth test to them * and apply Zpass and Zfail stencil ops. */ if (ctx->Depth.Test == GL_FALSE || ctx->DrawBuffer->Attachment[BUFFER_DEPTH].Renderbuffer == NULL) { /* * No depth buffer, just apply zpass stencil function to active pixels. */ apply_stencil_op(ctx, ctx->Stencil.ZPassFunc[face], face, count, stencilBuf, mask, stencilStride); } else { /* * Perform depth buffering, then apply zpass or zfail stencil function. */ SWcontext *swrast = SWRAST_CONTEXT(ctx); GLubyte *passMask = swrast->stencil_temp.buf2; GLubyte *failMask = swrast->stencil_temp.buf3; GLubyte *origMask = swrast->stencil_temp.buf4; /* save the current mask bits */ memcpy(origMask, mask, count * sizeof(GLubyte)); /* apply the depth test */ _swrast_depth_test_span(ctx, span); compute_pass_fail_masks(count, origMask, mask, passMask, failMask); /* apply the pass and fail operations */ if (ctx->Stencil.ZFailFunc[face] != GL_KEEP) { apply_stencil_op(ctx, ctx->Stencil.ZFailFunc[face], face, count, stencilBuf, failMask, stencilStride); } if (ctx->Stencil.ZPassFunc[face] != GL_KEEP) { apply_stencil_op(ctx, ctx->Stencil.ZPassFunc[face], face, count, stencilBuf, passMask, stencilStride); } } /* Write updated stencil values back into hardware stencil buffer */ if (span->arrayMask & SPAN_XY) { put_s8_values(ctx, rb, count, span->array->x, span->array->y, stencilBuf); } span->writeAll = GL_FALSE; return GL_TRUE; /* one or more fragments passed both tests */ } /** * Return a span of stencil values from the stencil buffer. * Used for glRead/CopyPixels * Input: n - how many pixels * x,y - location of first pixel * Output: stencil - the array of stencil values */ void _swrast_read_stencil_span(struct gl_context *ctx, struct gl_renderbuffer *rb, GLint n, GLint x, GLint y, GLubyte stencil[]) { GLubyte *src; if (y < 0 || y >= (GLint) rb->Height || x + n <= 0 || x >= (GLint) rb->Width) { /* span is completely outside framebuffer */ return; /* undefined values OK */ } if (x < 0) { GLint dx = -x; x = 0; n -= dx; stencil += dx; } if (x + n > (GLint) rb->Width) { GLint dx = x + n - rb->Width; n -= dx; } if (n <= 0) { return; } src = _swrast_pixel_address(rb, x, y); _mesa_unpack_ubyte_stencil_row(rb->Format, n, src, stencil); } /** * Write a span of stencil values to the stencil buffer. This function * applies the stencil write mask when needed. * Used for glDraw/CopyPixels * Input: n - how many pixels * x, y - location of first pixel * stencil - the array of stencil values */ void _swrast_write_stencil_span(struct gl_context *ctx, GLint n, GLint x, GLint y, const GLubyte stencil[] ) { SWcontext *swrast = SWRAST_CONTEXT(ctx); struct gl_framebuffer *fb = ctx->DrawBuffer; struct gl_renderbuffer *rb = fb->Attachment[BUFFER_STENCIL].Renderbuffer; const GLuint stencilMax = (1 << fb->Visual.stencilBits) - 1; const GLuint stencilMask = ctx->Stencil.WriteMask[0]; GLubyte *stencilBuf; if (y < 0 || y >= (GLint) rb->Height || x + n <= 0 || x >= (GLint) rb->Width) { /* span is completely outside framebuffer */ return; /* undefined values OK */ } if (x < 0) { GLint dx = -x; x = 0; n -= dx; stencil += dx; } if (x + n > (GLint) rb->Width) { GLint dx = x + n - rb->Width; n -= dx; } if (n <= 0) { return; } stencilBuf = _swrast_pixel_address(rb, x, y); if ((stencilMask & stencilMax) != stencilMax) { /* need to apply writemask */ GLubyte *destVals = swrast->stencil_temp.buf1; GLubyte *newVals = swrast->stencil_temp.buf2; GLint i; _mesa_unpack_ubyte_stencil_row(rb->Format, n, stencilBuf, destVals); for (i = 0; i < n; i++) { newVals[i] = (stencil[i] & stencilMask) | (destVals[i] & ~stencilMask); } _mesa_pack_ubyte_stencil_row(rb->Format, n, newVals, stencilBuf); } else { _mesa_pack_ubyte_stencil_row(rb->Format, n, stencil, stencilBuf); } } /** * Clear the stencil buffer. If the buffer is a combined * depth+stencil buffer, only the stencil bits will be touched. */ void _swrast_clear_stencil_buffer(struct gl_context *ctx) { struct gl_renderbuffer *rb = ctx->DrawBuffer->Attachment[BUFFER_STENCIL].Renderbuffer; const GLubyte stencilBits = ctx->DrawBuffer->Visual.stencilBits; const GLuint writeMask = ctx->Stencil.WriteMask[0]; const GLuint stencilMax = (1 << stencilBits) - 1; GLint x, y, width, height; GLubyte *map; GLint rowStride, i, j; GLbitfield mapMode; if (!rb || writeMask == 0) return; /* compute region to clear */ x = ctx->DrawBuffer->_Xmin; y = ctx->DrawBuffer->_Ymin; width = ctx->DrawBuffer->_Xmax - ctx->DrawBuffer->_Xmin; height = ctx->DrawBuffer->_Ymax - ctx->DrawBuffer->_Ymin; mapMode = GL_MAP_WRITE_BIT; if ((writeMask & stencilMax) != stencilMax) { /* need to mask stencil values */ mapMode |= GL_MAP_READ_BIT; } else if (_mesa_get_format_bits(rb->Format, GL_DEPTH_BITS) > 0) { /* combined depth+stencil, need to mask Z values */ mapMode |= GL_MAP_READ_BIT; } ctx->Driver.MapRenderbuffer(ctx, rb, x, y, width, height, mapMode, &map, &rowStride); if (!map) { _mesa_error(ctx, GL_OUT_OF_MEMORY, "glClear(stencil)"); return; } switch (rb->Format) { case MESA_FORMAT_S8: { GLubyte clear = ctx->Stencil.Clear & writeMask & 0xff; GLubyte mask = (~writeMask) & 0xff; if (mask != 0) { /* masked clear */ for (i = 0; i < height; i++) { GLubyte *row = map; for (j = 0; j < width; j++) { row[j] = (row[j] & mask) | clear; } map += rowStride; } } else if (rowStride == width) { /* clear whole buffer */ memset(map, clear, width * height); } else { /* clear scissored */ for (i = 0; i < height; i++) { memset(map, clear, width); map += rowStride; } } } break; case MESA_FORMAT_S8_Z24: { GLuint clear = (ctx->Stencil.Clear & writeMask & 0xff) << 24; GLuint mask = (((~writeMask) & 0xff) << 24) | 0xffffff; for (i = 0; i < height; i++) { GLuint *row = (GLuint *) map; for (j = 0; j < width; j++) { row[j] = (row[j] & mask) | clear; } map += rowStride; } } break; case MESA_FORMAT_Z24_S8: { GLuint clear = ctx->Stencil.Clear & writeMask & 0xff; GLuint mask = 0xffffff00 | ((~writeMask) & 0xff); for (i = 0; i < height; i++) { GLuint *row = (GLuint *) map; for (j = 0; j < width; j++) { row[j] = (row[j] & mask) | clear; } map += rowStride; } } break; default: _mesa_problem(ctx, "Unexpected stencil buffer format %s" " in _swrast_clear_stencil_buffer()", _mesa_get_format_name(rb->Format)); } ctx->Driver.UnmapRenderbuffer(ctx, rb); }
9,929
10,016
/* * HeadURL: https://svn.apache.org/repos/asf/httpcomponents/oac.hc3x/trunk/src/java/org/apache/commons/httpclient/protocol/SecureProtocolSocketFactory.jav * Revision: 608014 * Date: 2008-01-02 05:48:53 +0000 (Wed, 02 Jan 2008) * * ==================================================================== * * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ==================================================================== * * This software consists of voluntary contributions made by many * individuals on behalf of the Apache Software Foundation. For more * information on the Apache Software Foundation, please see * <http://www.apache.org/>. * */ package org.apache.commons.httpclient.protocol; import java.io.IOException; import java.net.Socket; import java.net.UnknownHostException; import org.apache.commons.httpclient.params.HttpConnectionParams; /* * Forked class... * * It was forked because ZAP depends (and uses) Commons HttpClient which is not compatible with, the newer version, * HttpComponents Client. * * Changes: * - Removed the characters "$" from the previous SVN keywords (HeadURL, Revision and Date). * - Add a (default) createSocket(...) that allows to provide the HttpConnectionParams. */ /** * A ProtocolSocketFactory that is secure. * * @see org.apache.commons.httpclient.protocol.ProtocolSocketFactory * * @author <NAME> * @author <a href="mailto:<EMAIL>"><NAME></a> * @since 2.0 */ public interface SecureProtocolSocketFactory extends ProtocolSocketFactory { /** * Returns a socket connected to the given host that is layered over an * existing socket. Used primarily for creating secure sockets through * proxies. * * @param socket the existing socket * @param host the host name/IP * @param port the port on the host * @param autoClose a flag for closing the underling socket when the created * socket is closed * * @return Socket a new socket * * @throws IOException if an I/O error occurs while creating the socket * @throws UnknownHostException if the IP address of the host cannot be * determined */ Socket createSocket( Socket socket, String host, int port, boolean autoClose ) throws IOException, UnknownHostException; default Socket createSocket( Socket socket, String host, int port, boolean autoClose, HttpConnectionParams params ) throws IOException { return createSocket(socket, host, port, autoClose); } }
985
14,668
// Copyright 2021 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chromeos/services/libassistant/abortable_task_list.h" #include <algorithm> namespace chromeos { namespace libassistant { AbortableTaskList::AbortableTaskList() = default; AbortableTaskList::~AbortableTaskList() { AbortAll(); } void AbortableTaskList::AddInternal(std::unique_ptr<AbortableTask> task) { // We cleanup finished tasks when a new task is added. RemoveFinishedTasks(); tasks_.push_back(std::move(task)); } void AbortableTaskList::AbortAll() { // Cancel all tasks that are not finished yet. for (auto& task : tasks_) { if (!task->IsFinished()) task->Abort(); } tasks_.clear(); } void AbortableTaskList::RemoveFinishedTasks() { tasks_.erase(std::remove_if(tasks_.begin(), tasks_.end(), [](const std::unique_ptr<AbortableTask>& task) { return task->IsFinished(); }), tasks_.end()); } } // namespace libassistant } // namespace chromeos
457
573
#!/usr/bin/env python3 import os import sys import runpy assert len(sys.argv) > 1 sys.argv = sys.argv[1:] sys.path.insert(0, os.path.dirname(sys.argv[0])) runpy.run_path(sys.argv[0], run_name='__main__')
93
311
/** * Copyright 2019 The JoyQueue Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.joyqueue.toolkit.config; import org.joyqueue.toolkit.config.annotation.Binding; import org.joyqueue.toolkit.reflect.Reflect; import org.joyqueue.toolkit.reflect.ReflectException; import java.lang.annotation.Annotation; import java.lang.reflect.Array; import java.lang.reflect.Field; import java.util.Collection; import java.util.Date; import java.util.Map; /** * 对象绑定 * Created by hexiaofeng on 16-8-29. */ public class BindingBinder implements Binder { public static final BindingBinder INSTANCE = new BindingBinder(); @Override public void bind(final Field field, final Annotation annotation, final Object target, final Context context) throws ReflectException { if (field == null || annotation == null || target == null || context == null || !(annotation instanceof Binding)) { return; } // 去掉基本类型 Class type = field.getType(); if (!isSupported(type)) { return; } else { Object value = Reflect.get(field, target); if (type.isArray()) { // 数组 int length = Array.getLength(value); Object obj; for (int i = 0; i < length; i++) { obj = Array.get(value, i); if (obj != null && isSupported(obj.getClass())) { Binders.bind(context, obj); } } } else if (Collection.class.isAssignableFrom(type)) { // 集合 for (Object obj : (Collection) value) { if (obj != null && isSupported(obj.getClass())) { Binders.bind(context, obj); } } } else { Binders.bind(context, value); } } } /** * 是否支持 * * @param type 类型 * @return 基本类型标示 */ protected boolean isSupported(final Class type) { if (type == int.class) { return false; } else if (type == long.class) { return false; } else if (type == double.class) { return false; } else if (type == short.class) { return false; } else if (type == byte.class) { return false; } else if (type == boolean.class) { return false; } else if (Number.class.isAssignableFrom(type)) { return false; } else if (type == Boolean.class) { return false; } else if (type == String.class) { return false; } else if (type == Object.class) { return false; } else if (Date.class.isAssignableFrom(type)) { return false; } else return !Map.class.isAssignableFrom(type); } }
1,570
5,535
<reponame>bradfordb-vmware/gpdb from mock import * from .gp_unittest import * from gppylib.system.info import * class InfoTestCase(GpTestCase): def setUp(self): self.vmem = Mock() self.vmem.available = 123 * MB STACK_SIZE = 8 * MB self.apply_patches([ patch("psutil.virtual_memory", return_value=self.vmem), patch("resource.getrlimit", return_value=[STACK_SIZE, 0]) ]) def test_automatic_thread_count(self): self.assertEqual(get_max_available_thread_count(), 3) def test_automatic_thread_minimum(self): self.vmem.available = 123 self.assertEqual(get_max_available_thread_count(), 1) if __name__ == '__main__': run_tests()
318
3,579
<gh_stars>1000+ /* * Copyright 2015, The Querydsl Team (http://www.querydsl.com/team) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.querydsl.jpa; import javax.persistence.Entity; import com.querydsl.core.JoinType; import com.querydsl.core.QueryMetadata; import com.querydsl.core.support.CollectionAnyVisitor; import com.querydsl.core.support.Context; import com.querydsl.core.types.*; import com.querydsl.core.types.dsl.EntityPathBase; import com.querydsl.core.types.dsl.Expressions; import com.querydsl.core.types.dsl.SimplePath; /** * {@code JPACollectionAnyVisitor} extends the {@link CollectionAnyVisitor} class with module specific * extensions * * @author tiwe * */ class JPACollectionAnyVisitor extends CollectionAnyVisitor { @SuppressWarnings("unchecked") @Override protected Predicate exists(Context c, Predicate condition) { JPAQueryMixin<?> query = new JPAQueryMixin<Object>(); query.setProjection(Expressions.ONE); for (int i = 0; i < c.paths.size(); i++) { Path<?> child = c.paths.get(i).getMetadata().getParent(); EntityPath<Object> replacement = (EntityPath<Object>) c.replacements.get(i); if (c.paths.get(i).getType().isAnnotationPresent(Entity.class)) { query.addJoin(i == 0 ? JoinType.DEFAULT : JoinType.INNERJOIN, Expressions.as( Expressions.listPath((Class) c.paths.get(i).getType(), SimplePath.class, child.getMetadata()), replacement)); } else { // join via parent Path<?> parent = child.getMetadata().getParent(); EntityPathBase<Object> newParent = new EntityPathBase<Object>(parent.getType(), ExpressionUtils.createRootVariable(parent, Math.abs(condition.hashCode()))); EntityPath<Object> newChild = new EntityPathBase<Object>(child.getType(), PathMetadataFactory.forProperty(newParent, child.getMetadata().getName())); query.from(newParent); query.addJoin(JoinType.INNERJOIN, Expressions.as(newChild, replacement)); query.where(ExpressionUtils.eq(newParent, parent)); } } c.clear(); query.where(condition); return ExpressionUtils.predicate(Ops.EXISTS, asExpression(query.getMetadata())); } private Expression<?> asExpression(QueryMetadata metadata) { return new SubQueryExpressionImpl<Object>(metadata.getProjection().getType(), metadata); } }
1,189
719
<filename>pytorch/pytorch_edgeml/graph/bonsai.py<gh_stars>100-1000 # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT license. import torch import torch.nn as nn import numpy as np class Bonsai(nn.Module): def __init__(self, numClasses, dataDimension, projectionDimension, treeDepth, sigma, W=None, T=None, V=None, Z=None): super(Bonsai, self).__init__() ''' Expected Dimensions: Bonsai Params // Optional W [numClasses*totalNodes, projectionDimension] V [numClasses*totalNodes, projectionDimension] Z [projectionDimension, dataDimension + 1] T [internalNodes, projectionDimension] internalNodes = 2**treeDepth - 1 totalNodes = 2*internalNodes + 1 sigma - tanh non-linearity sigmaI - Indicator function for node probabilities sigmaI - has to be set to infinity(1e9 for practice) while doing testing/inference numClasses will be reset to 1 in binary case ''' self.dataDimension = dataDimension self.projectionDimension = projectionDimension if numClasses == 2: self.numClasses = 1 else: self.numClasses = numClasses self.treeDepth = treeDepth self.sigma = sigma self.internalNodes = 2**self.treeDepth - 1 self.totalNodes = 2 * self.internalNodes + 1 self.W = self.initW(W) self.V = self.initV(V) self.T = self.initT(T) self.Z = self.initZ(Z) self.assertInit() def initZ(self, Z): if Z is None: Z = torch.randn([self.projectionDimension, self.dataDimension]) Z = nn.Parameter(Z) else: Z = torch.from_numpy(Z.astype(np.float32)) Z = nn.Parameter(Z) return Z def initW(self, W): if W is None: W = torch.randn( [self.numClasses * self.totalNodes, self.projectionDimension]) W = nn.Parameter(W) else: W = torch.from_numpy(W.astype(np.float32)) W = nn.Parameter(W) return W def initV(self, V): if V is None: V = torch.randn( [self.numClasses * self.totalNodes, self.projectionDimension]) V = nn.Parameter(V) else: V = torch.from_numpy(V.astype(np.float32)) V = nn.Parameter(V) return V def initT(self, T): if T is None: T = torch.randn([self.internalNodes, self.projectionDimension]) T = nn.Parameter(T) else: T = torch.from_numpy(T.astype(np.float32)) T = nn.Parameter(T) return T def forward(self, X, sigmaI): ''' Function to build/exxecute the Bonsai Tree graph Expected Dimensions X is [batchSize, self.dataDimension] sigmaI is constant ''' X_ = torch.matmul(self.Z, torch.t(X)) / self.projectionDimension W_ = self.W[0:(self.numClasses)] V_ = self.V[0:(self.numClasses)] self.__nodeProb = [] self.__nodeProb.append(1) score_ = self.__nodeProb[0] * (torch.matmul(W_, X_) * torch.tanh(self.sigma * torch.matmul(V_, X_))) for i in range(1, self.totalNodes): W_ = self.W[i * self.numClasses:((i + 1) * self.numClasses)] V_ = self.V[i * self.numClasses:((i + 1) * self.numClasses)] T_ = torch.reshape(self.T[int(np.ceil(i / 2.0) - 1.0)], [-1, self.projectionDimension]) prob = (1 + ((-1)**(i + 1)) * torch.tanh(sigmaI * torch.matmul(T_, X_))) prob = prob / 2.0 prob = self.__nodeProb[int(np.ceil(i / 2.0) - 1.0)] * prob self.__nodeProb.append(prob) score_ += self.__nodeProb[i] * (torch.matmul(W_, X_) * torch.tanh(self.sigma * torch.matmul(V_, X_))) self.score = score_ self.X_ = X_ return torch.t(self.score), self.X_ def assertInit(self): errRank = "All Parameters must has only two dimensions shape = [a, b]" assert len(self.W.shape) == len(self.Z.shape), errRank assert len(self.W.shape) == len(self.T.shape), errRank assert len(self.W.shape) == 2, errRank msg = "W and V should be of same Dimensions" assert self.W.shape == self.V.shape, msg errW = "W and V are [numClasses*totalNodes, projectionDimension]" assert self.W.shape[0] == self.numClasses * self.totalNodes, errW assert self.W.shape[1] == self.projectionDimension, errW errZ = "Z is [projectionDimension, dataDimension]" assert self.Z.shape[0] == self.projectionDimension, errZ assert self.Z.shape[1] == self.dataDimension, errZ errT = "T is [internalNodes, projectionDimension]" assert self.T.shape[0] == self.internalNodes, errT assert self.T.shape[1] == self.projectionDimension, errT assert int(self.numClasses) > 0, "numClasses should be > 1" msg = "# of features in data should be > 0" assert int(self.dataDimension) > 0, msg msg = "Projection should be > 0 dims" assert int(self.projectionDimension) > 0, msg msg = "treeDepth should be >= 0" assert int(self.treeDepth) >= 0, msg
2,790
14,668
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef COMPONENTS_DOWNLOAD_PUBLIC_COMMON_DOWNLOAD_FILE_H_ #define COMPONENTS_DOWNLOAD_PUBLIC_COMMON_DOWNLOAD_FILE_H_ #include <stdint.h> #include <memory> #include <string> #include "base/callback_forward.h" #include "base/files/file_path.h" #include "build/build_config.h" #include "components/download/public/common/base_file.h" #include "components/download/public/common/download_export.h" #include "components/download/public/common/download_interrupt_reasons.h" #include "components/download/public/common/download_item.h" #include "components/download/public/common/input_stream.h" #include "components/services/quarantine/public/mojom/quarantine.mojom.h" #include "mojo/public/cpp/bindings/pending_remote.h" #include "mojo/public/cpp/system/data_pipe.h" class GURL; namespace download { // These objects live exclusively on the download sequence and handle the // writing operations for one download. These objects live only for the duration // that the download is 'in progress': once the download has been completed or // cancelled, the DownloadFile is destroyed. class COMPONENTS_DOWNLOAD_EXPORT DownloadFile { public: // Callback used with Initialize. // // On a successful initialize, |reason| = DOWNLOAD_INTERRUPT_REASON_NONE; // on a failed initialize, it will be set to the reason for the failure. // // In the case that the originally downloaded file had to be deleted, // |bytes_wasted| would be set to > 0. // // TODO(b/73967242): Change this to a OnceCallback. This is currently a // repeating callback because gMock does not support all built in actions for // move-only arguments (specifically SaveArg from download_item_impl_unittest. using InitializeCallback = base::RepeatingCallback<void(DownloadInterruptReason reason, int64_t bytes_wasted)>; // Callback used with Rename*(). On a successful rename |reason| will be // DOWNLOAD_INTERRUPT_REASON_NONE and |path| the path the rename // was done to. On a failed rename, |reason| will contain the // error. typedef base::OnceCallback<void(DownloadInterruptReason reason, const base::FilePath& path)> RenameCompletionCallback; // Used to drop the request, when the byte stream reader should be closed on // download sequence. typedef base::RepeatingCallback<void(int64_t offset)> CancelRequestCallback; virtual ~DownloadFile() {} // Upon completion, |initialize_callback| will be called on the UI // thread as per the comment above, passing DOWNLOAD_INTERRUPT_REASON_NONE // on success, or a network download interrupt reason on failure. virtual void Initialize( InitializeCallback initialize_callback, CancelRequestCallback cancel_request_callback, const DownloadItem::ReceivedSlices& received_slices) = 0; // Add an input stream to write into a slice of the file, used for // parallel download. virtual void AddInputStream(std::unique_ptr<InputStream> stream, int64_t offset) = 0; // Rename the download file to |full_path|. If that file exists // |full_path| will be uniquified by suffixing " (<number>)" to the // file name before the extension. virtual void RenameAndUniquify(const base::FilePath& full_path, RenameCompletionCallback callback) = 0; // Rename the download file to |full_path| and annotate it with // "Mark of the Web" information about its source. No uniquification // will be performed. // |remote_quarantine| must be connected to an instance of the Quarantine // service. In the unexpected case that |remote_quarantine| is invalid, or the // service otherwise fails, mark-of-the-web is manually applied as a fallback. virtual void RenameAndAnnotate( const base::FilePath& full_path, const std::string& client_guid, const GURL& source_url, const GURL& referrer_url, mojo::PendingRemote<quarantine::mojom::Quarantine> remote_quarantine, RenameCompletionCallback callback) = 0; // Detach the file so it is not deleted on destruction. virtual void Detach() = 0; // Abort the download and automatically close the file. virtual void Cancel() = 0; // Sets the potential file length. This is called when a half-open range // request fails or completes successfully. If the range request fails, the // file length should not be larger than the request's offset. If the range // request completes successfully, the file length can be determined by // the request offset and the bytes received. So |length| may not be the // actual file length, but it should not be smaller than it. virtual void SetPotentialFileLength(int64_t length) = 0; virtual const base::FilePath& FullPath() const = 0; virtual bool InProgress() const = 0; virtual void Pause() = 0; virtual void Resume() = 0; #if defined(OS_ANDROID) // Renames the download file to an intermediate URI. If current_path is a // content URI, it will be used for the renaming. Otherwise, A new // intermediate URI will be created to write the download file. Once // completes, |callback| is called with a content URI to be written into. virtual void RenameToIntermediateUri(const GURL& original_url, const GURL& referrer_url, const base::FilePath& file_name, const std::string& mime_type, const base::FilePath& current_path, RenameCompletionCallback callback) = 0; // Publishes the download to public. Once completes, |callback| is called with // the final content URI. virtual void PublishDownload(RenameCompletionCallback callback) = 0; // Returns the suggested file path from the system. virtual base::FilePath GetDisplayName() = 0; #endif // defined(OS_ANDROID) }; } // namespace download #endif // COMPONENTS_DOWNLOAD_PUBLIC_COMMON_DOWNLOAD_FILE_H_
2,054
2,151
<gh_stars>1000+ /* * Copyright (c) 2013 The Native Client Authors. All rights reserved. * Use of this source code is governed by a BSD-style license that can be * found in the LICENSE file. */ #include <setjmp.h> #include <stdlib.h> #include <stdint.h> #include <stdio.h> extern "C" void try_block_test(); class SomeException { }; class ObjWithDtor { public: ~ObjWithDtor() { printf("[in ~ObjWithDtor()]\n"); } void no_op() { } }; // Stores x86-64's callee-saved registers (excluding r15). const int kRegsCount = 4; struct RegsState { uint64_t regs[kRegsCount]; }; struct RegsState g_expected_regs; struct RegsState g_actual_regs; jmp_buf g_jmp_buf; int g_test_unwind_resume = 0; int g_failed = 0; extern "C" void throw_some_exception() { if (g_test_unwind_resume) { ObjWithDtor obj; // Prevent warning about variable not being used. obj.no_op(); // This "throw" indirectly calls _Unwind_RaiseException(), which // passes control back to throw_some_exception(), which will: // * run the destructor for ObjWithDtor; // * call _Unwind_Resume(), which passes control back to // throw_some_exception()'s callee. throw SomeException(); } else { // This "throw" indirectly calls _Unwind_RaiseException(), which // will pass control back to throw_some_exception()'s callee. throw SomeException(); } } extern "C" void check_register_state() { for (int i = 0; i < kRegsCount; i++) { printf("register value: expected=0x%llx, actual=0x%llx\n", (long long) g_expected_regs.regs[i], (long long) g_actual_regs.regs[i]); if (g_expected_regs.regs[i] != g_actual_regs.regs[i]) { g_failed = 1; } } longjmp(g_jmp_buf, 1); } void run_test() { try { if (!setjmp(g_jmp_buf)) try_block_test(); } catch (SomeException &) { // This catch block should never be executed: the test longjmp()s // past it. Without this catch block, the unwinder will not see // any handlers for the exception and will abort before running // the assembly-code cleanup handler that calls // check_register_state(). abort(); } } int main() { printf("Testing restoring registers via _Unwind_RaiseException...\n"); run_test(); printf("Testing restoring registers via _Unwind_Resume...\n"); g_test_unwind_resume = 1; run_test(); return g_failed; }
913
577
<reponame>jeff5/jython-whinchat package org.python.core; import java.io.Serializable; // XXX: isBaseType = false @Untraversable public class PyNotImplemented extends PySingleton implements Serializable { PyNotImplemented() { super("NotImplemented"); } public Object __tojava__(Class c) { // Danger here. java.lang.Object gets null not None if (c == PyObject.class) { return this; } if (c.isPrimitive()) { return Py.NoConversion; } return null; } public boolean isMappingType() { return false; } public boolean isSequenceType() { return false; } private Object writeReplace() { return new Py.SingletonResolver("NotImplemented"); } }
315
1,149
<gh_stars>1000+ #ifndef _RL_WAL_H #define _RL_WAL_H int rl_write_apply_wal(rlite *db); int rl_write_wal(const char *wal_path, rlite *db, unsigned char **_data, size_t *_datalen); int rl_apply_wal(rlite *db); #endif
101
317
package com.googlecode.totallylazy.matchers; import org.hamcrest.Description; import org.hamcrest.Matcher; import org.hamcrest.StringDescription; import org.junit.Test; import static com.googlecode.totallylazy.functions.Callables.returns; import static com.googlecode.totallylazy.matchers.LazyEqualsMatcher.lazyEqualTo; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.is; import static org.hamcrest.core.IsNot.not; public class LazyEqualsMatcherTest { private static final String SIMPLE_DESCRIPTION_TEXT = "description"; private static final String EXPECTED = "myObject"; private static final String ACTUAL = EXPECTED; private static final String DIFFERENT_ACTUAL = "different"; private static final String FULL_DESCRIPTION_TEXT = SIMPLE_DESCRIPTION_TEXT + " [expected: " + EXPECTED + ", actual: " + DIFFERENT_ACTUAL + "]"; @Test public void shouldBeEqual() { assertThat(ACTUAL, lazyEqualTo(SIMPLE_DESCRIPTION_TEXT, returns(EXPECTED))); } @Test public void shouldNotBeEqual() { assertThat(DIFFERENT_ACTUAL, not(lazyEqualTo(SIMPLE_DESCRIPTION_TEXT, returns(EXPECTED)))); } @Test public void shouldDescribeEquality() { Description description = new StringDescription(); lazyEqualTo(SIMPLE_DESCRIPTION_TEXT, returns(EXPECTED)).describeTo(description); assertThat(description.toString(), is(SIMPLE_DESCRIPTION_TEXT)); } @Test public void shouldDescribeEqualityWithExpectedAndActual() { Description description = new StringDescription(); Matcher<String> matcher = lazyEqualTo(SIMPLE_DESCRIPTION_TEXT, returns(EXPECTED)); matcher.matches(DIFFERENT_ACTUAL); matcher.describeTo(description); assertThat(description.toString(), is(FULL_DESCRIPTION_TEXT)); } }
653
419
#include "PlayerAction_Jump.h" #include "Game/Core/Player/PlayerPhysicsController.h" #include "Game/Core/Player/Components/Component_MainPlayer.h" #include "Game/Core/Player/PlayerCameraController.h" #include "Game/Core/Player/PlayerAnimationController.h" #include "Game/Core/Player/GraphControllers/PlayerGraphController_Ability.h" #include "Engine/Physics/Components/Component_PhysicsCharacter.h" #include "System/Input/InputSystem.h" // hack for now #include "Game/Core/Player/GraphControllers/PlayerGraphController_Locomotion.h" //------------------------------------------------------------------------- namespace KRG::Player { static Radians g_maxAngularSpeed = Radians( Degrees( 90 ) ); // radians/second static float g_smallJumpDistance = 3.0f; // meters/second static float g_bigJumpDistance = 8.0f; // meters/second static float g_bigJumpEnergyCost = 1.0f; // energy levels static float g_gravityAcceleration = 30.0f; // meters/second squared static float g_maxAirControlAcceleration = 10.0f; // meters/second squared static float g_maxAirControlSpeed = 6.5f; // meters/second static Seconds g_bigJumpHoldTime = 0.3f; // seconds // 1.) V = Vi + a(t) // // 0 = Vi + a(t) V = 0 since we want to reach the apex, hence velocity 0. // Vi = -a(t) // // 2.) d = Vi(t) + 0.5(a)(t^2) // // d = -a(t)(t) + 0.5(a)(t^2) substitute vi = -a(t) from 1. // d = -a(t^2) + 0.5(a)(t^2) // d = a(t^2)(-1 + 0.5) // d = -0.5(a)(t^2) // t^2 = -2(d)/a // t = sqrt( -2(d)/a ) // // Vi = -a(t) back to using 1. now that we have t we can calculate Vi. // Vi = -a( sqrt( -2(d)/a ) ) the negative sign will disappear since our acceleration is negative static float g_bigJumpTimeToApex = Math::Sqrt( 2 * g_bigJumpDistance / ( g_gravityAcceleration ) ); static float g_bigJumpinitialSpeed = g_gravityAcceleration * g_bigJumpTimeToApex; static float g_smallJumpTimeToApex = Math::Sqrt( 2 * g_smallJumpDistance / ( g_gravityAcceleration ) ); static float g_smallJumpinitialSpeed = g_gravityAcceleration * g_smallJumpTimeToApex; //------------------------------------------------------------------------- bool JumpAction::TryStartInternal( ActionContext const& ctx ) { if( ctx.m_pInputState->GetControllerState()->WasReleased( Input::ControllerButton::FaceButtonDown ) ) { ctx.m_pAnimationController->SetCharacterState( CharacterAnimationState::Ability ); auto pAbilityAnimController = ctx.GetAnimSubGraphController<AbilityGraphController>(); pAbilityAnimController->StartJump(); ctx.m_pCharacterController->DisableGravity(); ctx.m_pCharacterController->DisableProjectionOntoFloor(); ctx.m_pCharacterController->EnableStepHeight(); m_jumpTimer.Start(); if( m_isChargedJumpReady ) { ctx.m_pPlayerComponent->ConsumeEnergy( g_bigJumpEnergyCost ); } m_previousHeight = 0.0f; return true; } else // Check hold state { m_isChargedJumpReady = false; Seconds jumpHoldTime = 0.0f; if( ctx.m_pInputState->GetControllerState()->IsHeldDown( Input::ControllerButton::FaceButtonDown, &jumpHoldTime ) ) { if( jumpHoldTime > g_bigJumpHoldTime && ctx.m_pPlayerComponent->HasEnoughEnergy( g_bigJumpEnergyCost ) ) { m_isChargedJumpReady = true; } } } return false; } Action::Status JumpAction::UpdateInternal( ActionContext const& ctx ) { // check if we had a collision //------------------------------------------------------------------------- float const jumpTime = ( m_isChargedJumpReady ? g_bigJumpTimeToApex : g_smallJumpTimeToApex ); if( m_jumpTimer.GetElapsedTimeSeconds() >= jumpTime ) { // Jump Completed return Status::Completed; } else if( m_jumpTimer.GetElapsedTimeSeconds() > 0.0f && ctx.m_pCharacterComponent->GetCharacterVelocity().m_z <= 0.0f ) { // Jump Collided with a over head collision return Status::Completed; } else { m_jumpTimer.Update( ctx.GetDeltaTime() ); float deltaHeight = ctx.m_pPlayerComponent->jumpCurve.Evaluate( m_jumpTimer.GetElapsedTimeSeconds() / jumpTime ) * ( m_isChargedJumpReady ? g_bigJumpDistance : g_smallJumpDistance ) - m_previousHeight; float verticalVelocity = deltaHeight / ctx.GetDeltaTime(); m_previousHeight += deltaHeight; auto const pControllerState = ctx.m_pInputState->GetControllerState(); KRG_ASSERT( pControllerState != nullptr ); // Calculate desired player displacement //------------------------------------------------------------------------- Vector const movementInputs = pControllerState->GetLeftAnalogStickValue(); auto const& camFwd = ctx.m_pCameraController->GetCameraRelativeForwardVector2D(); auto const& camRight = ctx.m_pCameraController->GetCameraRelativeRightVector2D(); // Use last frame camera orientation Vector const CurrentVelocity = ctx.m_pCharacterComponent->GetCharacterVelocity(); Vector const CurrentVelocity2D = CurrentVelocity * Vector( 1.0f, 1.0f, 0.0f ); float const CurrentSpeed2D = CurrentVelocity2D.GetLength2(); Vector const forward = camFwd * movementInputs.m_y; Vector const right = camRight * movementInputs.m_x; Vector const desiredHeadingVelocity2D = ( forward + right ) * g_maxAirControlAcceleration * ctx.GetDeltaTime(); Vector ResultingVelocity = CurrentVelocity2D + desiredHeadingVelocity2D; float const Length = ResultingVelocity.GetLength2(); if( Length > g_maxAirControlSpeed ) { ResultingVelocity = ResultingVelocity.GetNormalized2() * g_maxAirControlSpeed; } ResultingVelocity.m_z = verticalVelocity; Vector const Facing = desiredHeadingVelocity2D.IsZero2() ? ctx.m_pCharacterComponent->GetForwardVector() : desiredHeadingVelocity2D.GetNormalized2(); // Update animation controller //------------------------------------------------------------------------- auto pLocomotionGraphController = ctx.GetAnimSubGraphController<LocomotionGraphController>(); pLocomotionGraphController->SetLocomotionDesires( ctx.GetDeltaTime(), ResultingVelocity, Facing ); } return Status::Interruptible; } void JumpAction::StopInternal( ActionContext const& ctx, StopReason reason ) { } }
2,960
590
/*! @file @author <NAME> @date 09/2010 */ #include "Precompiled.h" #include "SettingsGeneralControl.h" #include "SettingsManager.h" #include "FactoryManager.h" namespace tools { FACTORY_ITEM_ATTRIBUTE(SettingsGeneralControl) SettingsGeneralControl::SettingsGeneralControl() : mGridStep(0), mGridEdit(nullptr), mSaveLastTexture(nullptr), mInterfaceLanguage(nullptr) { } SettingsGeneralControl::~SettingsGeneralControl() { mSaveLastTexture->eventMouseButtonClick -= MyGUI::newDelegate(this, &SettingsGeneralControl::notifyMouseButtonClick); mGridEdit->eventEditSelectAccept -= MyGUI::newDelegate(this, &SettingsGeneralControl::notifyNewGridStepAccept); mGridEdit->eventKeyLostFocus -= MyGUI::newDelegate(this, &SettingsGeneralControl::notifyNewGridStep); } void SettingsGeneralControl::OnInitialise(Control* _parent, MyGUI::Widget* _place, const std::string& _layoutName) { Control::OnInitialise(_parent, _place, _layoutName); assignWidget(mGridEdit, "gridEdit"); assignWidget(mSaveLastTexture, "SaveLastTexture"); assignWidget(mInterfaceLanguage, "InterfaceLanguage"); mGridEdit->eventEditSelectAccept += MyGUI::newDelegate(this, &SettingsGeneralControl::notifyNewGridStepAccept); mGridEdit->eventKeyLostFocus += MyGUI::newDelegate(this, &SettingsGeneralControl::notifyNewGridStep); mSaveLastTexture->eventMouseButtonClick += MyGUI::newDelegate(this, &SettingsGeneralControl::notifyMouseButtonClick); } void SettingsGeneralControl::loadSettings() { mGridStep = SettingsManager::getInstance().getValue<int>("Settings/GridStep"); mGridEdit->setCaption(MyGUI::utility::toString(mGridStep)); mSaveLastTexture->setStateSelected(SettingsManager::getInstance().getValue<bool>("Settings/SaveLastTexture")); setLanguageValue(SettingsManager::getInstance().getValue("Settings/InterfaceLanguage")); } void SettingsGeneralControl::saveSettings() { SettingsManager::getInstance().setValue("Settings/GridStep", mGridStep); SettingsManager::getInstance().setValue("Settings/SaveLastTexture", mSaveLastTexture->getStateSelected()); SettingsManager::getInstance().setValue("Settings/InterfaceLanguage", getLanguageValue()); } void SettingsGeneralControl::notifyNewGridStep(MyGUI::Widget* _sender, MyGUI::Widget* _new) { mGridStep = MyGUI::utility::parseInt(mGridEdit->getOnlyText()); mGridStep = (std::max)(1, mGridStep); mGridEdit->setCaption(MyGUI::utility::toString(mGridStep)); } void SettingsGeneralControl::notifyNewGridStepAccept(MyGUI::EditBox* _sender) { notifyNewGridStep(_sender); } void SettingsGeneralControl::notifyMouseButtonClick(MyGUI::Widget* _sender) { MyGUI::Button* button = _sender->castType<MyGUI::Button>(false); if (button != nullptr) button->setStateSelected(!button->getStateSelected()); } void SettingsGeneralControl::setLanguageValue(const std::string& _value) { for (size_t index = 0; index < mInterfaceLanguage->getItemCount(); index ++) { if (mInterfaceLanguage->getItemNameAt(index) == _value) { mInterfaceLanguage->setIndexSelected(index); return; } } for (size_t index = 0; index < mInterfaceLanguage->getItemCount(); index ++) { if (mInterfaceLanguage->getItemNameAt(index) == "Auto") { mInterfaceLanguage->setIndexSelected(index); return; } } } std::string SettingsGeneralControl::getLanguageValue() { if (mInterfaceLanguage->getIndexSelected() == MyGUI::ITEM_NONE) return "Auto"; return mInterfaceLanguage->getItemNameAt(mInterfaceLanguage->getIndexSelected()); } void SettingsGeneralControl::OnCommand(const std::string& _command) { Control::OnCommand(_command); if (_command == "Command_LoadSettings") loadSettings(); else if (_command == "Command_SaveSettings") saveSettings(); } }
1,244
1,997
"""Services module.""" import logging import sqlite3 from typing import Dict from mypy_boto3_s3 import S3Client class BaseService: def __init__(self) -> None: self.logger = logging.getLogger( f'{__name__}.{self.__class__.__name__}', ) class UserService(BaseService): def __init__(self, db: sqlite3.Connection) -> None: self.db = db super().__init__() def get_user(self, email: str) -> Dict[str, str]: self.logger.debug('User %s has been found in database', email) return {'email': email, 'password_hash': '...'} class AuthService(BaseService): def __init__(self, db: sqlite3.Connection, token_ttl: int) -> None: self.db = db self.token_ttl = token_ttl super().__init__() def authenticate(self, user: Dict[str, str], password: str) -> None: assert password is not None self.logger.debug( 'User %s has been successfully authenticated', user['email'], ) class PhotoService(BaseService): def __init__(self, db: sqlite3.Connection, s3: S3Client) -> None: self.db = db self.s3 = s3 super().__init__() def upload_photo(self, user: Dict[str, str], photo_path: str) -> None: self.logger.debug( 'Photo %s has been successfully uploaded by user %s', photo_path, user['email'], )
623
14,668
<reponame>zealoussnow/chromium // Copyright 2020 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package org.chromium.components.paintpreview.player; /** * Interface for handling overscroll events in the player. */ public interface OverscrollHandler { /** * Used to start an overscroll event. Returns true if it is able to be created/consumed. */ boolean start(); /** * Updates the overscroll amount. * * @param yDelta The change in overscroll amount. Positive values indicate more overscrolling. */ void pull(float yDelta); /** * Releases the overscroll event. This will trigger a refresh if a sufficient number and * distance of {@link #pull} calls occurred. */ void release(); /** * Resets the overscroll event if it was aborted. */ void reset(); }
301
2,151
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "net/http/http_proxy_client_socket_wrapper.h" #include <cstdio> #include <memory> #include "build/build_config.h" #include "net/cert/ct_policy_enforcer.h" #include "net/cert/do_nothing_ct_verifier.h" #include "net/cert/mock_cert_verifier.h" #include "net/dns/mock_host_resolver.h" #include "net/http/http_auth_cache.h" #include "net/http/http_auth_handler_factory.h" #include "net/http/http_server_properties_impl.h" #include "net/http/transport_security_state.h" #include "net/quic/chromium/mock_crypto_client_stream_factory.h" #include "net/quic/chromium/mock_quic_data.h" #include "net/quic/chromium/quic_http_utils.h" #include "net/quic/chromium/quic_test_packet_maker.h" #include "net/socket/socket_tag.h" #include "net/socket/socket_test_util.h" #include "net/ssl/channel_id_service.h" #include "net/ssl/default_channel_id_store.h" #include "net/test/cert_test_util.h" #include "net/test/gtest_util.h" #include "net/test/test_data_directory.h" #include "net/test/test_with_scoped_task_environment.h" #include "net/third_party/quic/core/quic_versions.h" #include "net/third_party/quic/test_tools/mock_clock.h" #include "net/third_party/quic/test_tools/mock_random.h" #include "net/traffic_annotation/network_traffic_annotation_test_helper.h" #include "testing/gtest/include/gtest/gtest.h" namespace net { namespace { const char kProxyHost[] = "proxy.example.org"; const int kProxyPort = 6121; const char kOriginHost[] = "www.google.org"; const int kOriginPort = 443; const char kUserAgent[] = "Mozilla/1.0"; const QuicStreamId kClientDataStreamId1 = kHeadersStreamId + 2; class MockSSLConfigService : public SSLConfigService { public: MockSSLConfigService() = default; void GetSSLConfig(SSLConfig* config) override { *config = config_; } private: ~MockSSLConfigService() override = default; SSLConfig config_; }; }; // namespace namespace test { class HttpProxyClientSocketWrapperTest : public ::testing::TestWithParam<std::tuple<QuicTransportVersion, bool>>, public WithScopedTaskEnvironment { protected: static const bool kFin = true; static const bool kIncludeVersion = true; static const bool kSendFeedback = true; HttpProxyClientSocketWrapperTest() : proxy_host_port_(kProxyHost, kProxyPort), endpoint_host_port_(kOriginHost, kOriginPort), ssl_config_service_(new MockSSLConfigService()), cert_verifier_(new MockCertVerifier()), channel_id_service_( new ChannelIDService(new DefaultChannelIDStore(nullptr))), cert_transparency_verifier_(new DoNothingCTVerifier()), random_generator_(0), quic_version_(std::get<0>(GetParam())), client_headers_include_h2_stream_dependency_(std::get<1>(GetParam())), client_maker_(quic_version_, 0, &clock_, kProxyHost, Perspective::IS_CLIENT, client_headers_include_h2_stream_dependency_), server_maker_(quic_version_, 0, &clock_, kProxyHost, Perspective::IS_SERVER, false), header_stream_offset_(0), response_offset_(0), store_server_configs_in_properties_(false), idle_connection_timeout_seconds_(kIdleConnectionTimeoutSeconds), reduced_ping_timeout_seconds_(kPingTimeoutSecs), migrate_sessions_on_network_change_(false), migrate_sessions_early_(false), migrate_sessions_on_network_change_v2_(false), migrate_sessions_early_v2_(false), allow_server_migration_(false), race_cert_verification_(false), estimate_initial_rtt_(false), quic_stream_factory_(nullptr), privacy_mode_(PRIVACY_MODE_DISABLED), http_auth_handler_factory_( HttpAuthHandlerFactory::CreateDefault(&host_resolver_)), client_socket_wrapper_(nullptr) { clock_.AdvanceTime(QuicTime::Delta::FromSeconds(1)); // why is this here??? } void Initialize() { DCHECK(!quic_stream_factory_); quic_stream_factory_.reset(new QuicStreamFactory( net_log_.net_log(), &host_resolver_, ssl_config_service_.get(), &socket_factory_, &http_server_properties_, cert_verifier_.get(), &ct_policy_enforcer_, channel_id_service_.get(), &transport_security_state_, cert_transparency_verifier_.get(), /*SocketPerformanceWatcherFactory=*/nullptr, &crypto_client_stream_factory_, &random_generator_, &clock_, kDefaultMaxPacketSize, /*user_agent_id=*/kUserAgent, store_server_configs_in_properties_, /*close_sessions_on_ip_change=*/true, /*mark_quic_broken_when_network_blackholes=*/false, idle_connection_timeout_seconds_, reduced_ping_timeout_seconds_, /*max_time_before_crypto_handshake_seconds=*/ kMaxTimeForCryptoHandshakeSecs, /*max_idle_time_before_crypto_handshake_seconds=*/ kInitialIdleTimeoutSecs, migrate_sessions_on_network_change_, migrate_sessions_early_, migrate_sessions_on_network_change_v2_, migrate_sessions_early_v2_, base::TimeDelta::FromSeconds(kMaxTimeOnNonDefaultNetworkSecs), kMaxMigrationsToNonDefaultNetworkOnPathDegrading, allow_server_migration_, race_cert_verification_, estimate_initial_rtt_, client_headers_include_h2_stream_dependency_, connection_options_, client_connection_options_, /*enable_token_binding=*/false, /*enable_channel_id=*/false, /*enable_socket_recv_optimization=*/false)); } void PopulateConnectRequestIR(spdy::SpdyHeaderBlock* block) { (*block)[":method"] = "CONNECT"; (*block)[":authority"] = endpoint_host_port_.ToString(); (*block)["user-agent"] = kUserAgent; } std::unique_ptr<QuicReceivedPacket> ConstructSettingsPacket( QuicPacketNumber packet_number) { return client_maker_.MakeInitialSettingsPacket(packet_number, &header_stream_offset_); } std::unique_ptr<QuicReceivedPacket> ConstructConnectRequestPacket( QuicPacketNumber packet_number) { spdy::SpdyHeaderBlock block; PopulateConnectRequestIR(&block); return client_maker_.MakeRequestHeadersPacket( packet_number, kClientDataStreamId1, kIncludeVersion, !kFin, ConvertRequestPriorityToQuicPriority(DEFAULT_PRIORITY), std::move(block), 0, nullptr, &header_stream_offset_); } std::unique_ptr<QuicReceivedPacket> ConstructServerConnectReplyPacket( QuicPacketNumber packet_number, bool fin) { spdy::SpdyHeaderBlock block; block[":status"] = "200"; return server_maker_.MakeResponseHeadersPacket( packet_number, kClientDataStreamId1, !kIncludeVersion, fin, std::move(block), nullptr, &response_offset_); } std::unique_ptr<QuicReceivedPacket> ConstructAckAndRstPacket( QuicPacketNumber packet_number, QuicRstStreamErrorCode error_code, QuicPacketNumber largest_received, QuicPacketNumber smallest_received, QuicPacketNumber least_unacked) { return client_maker_.MakeAckAndRstPacket( packet_number, !kIncludeVersion, kClientDataStreamId1, error_code, largest_received, smallest_received, least_unacked, kSendFeedback); } static ProofVerifyDetailsChromium DefaultProofVerifyDetails() { // Load a certificate that is valid for *.example.org scoped_refptr<X509Certificate> test_cert( ImportCertFromFile(GetTestCertsDirectory(), "wildcard.pem")); EXPECT_TRUE(test_cert.get()); ProofVerifyDetailsChromium verify_details; verify_details.cert_verify_result.verified_cert = test_cert; verify_details.cert_verify_result.is_issued_by_known_root = true; return verify_details; } HostPortPair proxy_host_port_; HostPortPair endpoint_host_port_; MockClock clock_; MockQuicData mock_quic_data_; // QuicStreamFactory environment NetLogWithSource net_log_; MockHostResolver host_resolver_; scoped_refptr<SSLConfigService> ssl_config_service_; MockTaggingClientSocketFactory socket_factory_; HttpServerPropertiesImpl http_server_properties_; std::unique_ptr<MockCertVerifier> cert_verifier_; DefaultCTPolicyEnforcer ct_policy_enforcer_; std::unique_ptr<ChannelIDService> channel_id_service_; TransportSecurityState transport_security_state_; std::unique_ptr<DoNothingCTVerifier> cert_transparency_verifier_; MockCryptoClientStreamFactory crypto_client_stream_factory_; MockRandom random_generator_; const QuicTransportVersion quic_version_; const bool client_headers_include_h2_stream_dependency_; QuicTestPacketMaker client_maker_; QuicTestPacketMaker server_maker_; QuicStreamOffset header_stream_offset_; QuicStreamOffset response_offset_; // Variables to configure QuicStreamFactory. bool store_server_configs_in_properties_; int idle_connection_timeout_seconds_; int reduced_ping_timeout_seconds_; bool migrate_sessions_on_network_change_; bool migrate_sessions_early_; bool migrate_sessions_on_network_change_v2_; bool migrate_sessions_early_v2_; bool allow_server_migration_; bool race_cert_verification_; bool estimate_initial_rtt_; QuicTagVector connection_options_; QuicTagVector client_connection_options_; std::unique_ptr<QuicStreamFactory> quic_stream_factory_; // HttpProxyClientSocketWrapper environment PrivacyMode privacy_mode_; HttpAuthCache http_auth_cache_; std::unique_ptr<HttpAuthHandlerRegistryFactory> http_auth_handler_factory_; std::unique_ptr<HttpProxyClientSocketWrapper> client_socket_wrapper_; }; TEST_P(HttpProxyClientSocketWrapperTest, QuicProxy) { Initialize(); ProofVerifyDetailsChromium verify_details = DefaultProofVerifyDetails(); crypto_client_stream_factory_.AddProofVerifyDetails(&verify_details); mock_quic_data_.AddWrite(SYNCHRONOUS, ConstructSettingsPacket(1)); mock_quic_data_.AddWrite(SYNCHRONOUS, ConstructConnectRequestPacket(2)); mock_quic_data_.AddRead(ASYNC, ConstructServerConnectReplyPacket(1, !kFin)); mock_quic_data_.AddRead(SYNCHRONOUS, ERR_IO_PENDING); mock_quic_data_.AddWrite( SYNCHRONOUS, ConstructAckAndRstPacket(3, QUIC_STREAM_CANCELLED, 1, 1, 1)); mock_quic_data_.AddSocketDataToFactory(&socket_factory_); scoped_refptr<TransportSocketParams> transport_params = new TransportSocketParams( proxy_host_port_, false, OnHostResolutionCallback(), TransportSocketParams::COMBINE_CONNECT_AND_WRITE_DEFAULT); scoped_refptr<SSLSocketParams> ssl_params = new SSLSocketParams(transport_params, nullptr, nullptr, proxy_host_port_, SSLConfig(), privacy_mode_, 0); transport_params = nullptr; client_socket_wrapper_.reset(new HttpProxyClientSocketWrapper( /*group_name=*/std::string(), /*requiest_priority=*/DEFAULT_PRIORITY, /*socket_tag=*/SocketTag(), /*respect_limits=*/ClientSocketPool::RespectLimits::DISABLED, /*connect_timeout_duration=*/base::TimeDelta::FromHours(1), /*proxy_negotiation_timeout_duration=*/base::TimeDelta::FromHours(1), /*transport_pool=*/nullptr, /*ssl_pool=*/nullptr, /*transport_params=*/nullptr, ssl_params, quic_version_, kUserAgent, endpoint_host_port_, &http_auth_cache_, http_auth_handler_factory_.get(), /*spdy_session_pool=*/nullptr, quic_stream_factory_.get(), /*is_trusted_proxy=*/false, /*tunnel=*/true, TRAFFIC_ANNOTATION_FOR_TESTS, net_log_)); TestCompletionCallback callback; client_socket_wrapper_->Connect(callback.callback()); EXPECT_THAT(callback.WaitForResult(), IsOk()); client_socket_wrapper_.reset(); EXPECT_TRUE(mock_quic_data_.AllReadDataConsumed()); EXPECT_TRUE(mock_quic_data_.AllWriteDataConsumed()); } // Test that the SocketTag is appropriately applied to the underlying socket // for QUIC proxies. #if defined(OS_ANDROID) TEST_P(HttpProxyClientSocketWrapperTest, QuicProxySocketTag) { Initialize(); ProofVerifyDetailsChromium verify_details = DefaultProofVerifyDetails(); crypto_client_stream_factory_.AddProofVerifyDetails(&verify_details); mock_quic_data_.AddWrite(SYNCHRONOUS, ConstructSettingsPacket(1)); mock_quic_data_.AddWrite(SYNCHRONOUS, ConstructConnectRequestPacket(2)); mock_quic_data_.AddRead(ASYNC, ConstructServerConnectReplyPacket(1, !kFin)); mock_quic_data_.AddRead(SYNCHRONOUS, ERR_IO_PENDING); mock_quic_data_.AddWrite( SYNCHRONOUS, ConstructAckAndRstPacket(3, QUIC_STREAM_CANCELLED, 1, 1, 1)); mock_quic_data_.AddSocketDataToFactory(&socket_factory_); scoped_refptr<TransportSocketParams> transport_params = new TransportSocketParams( proxy_host_port_, false, OnHostResolutionCallback(), TransportSocketParams::COMBINE_CONNECT_AND_WRITE_DEFAULT); scoped_refptr<SSLSocketParams> ssl_params = new SSLSocketParams(transport_params, nullptr, nullptr, proxy_host_port_, SSLConfig(), privacy_mode_, 0); transport_params = nullptr; SocketTag tag(getuid(), 0x87654321); client_socket_wrapper_.reset(new HttpProxyClientSocketWrapper( /*group_name=*/std::string(), /*requiest_priority=*/DEFAULT_PRIORITY, /*socket_tag=*/tag, /*respect_limits=*/ClientSocketPool::RespectLimits::DISABLED, /*connect_timeout_duration=*/base::TimeDelta::FromHours(1), /*proxy_negotiation_timeout_duration=*/base::TimeDelta::FromHours(1), /*transport_pool=*/nullptr, /*ssl_pool=*/nullptr, /*transport_params=*/nullptr, ssl_params, quic_version_, kUserAgent, endpoint_host_port_, &http_auth_cache_, http_auth_handler_factory_.get(), /*spdy_session_pool=*/nullptr, quic_stream_factory_.get(), /*is_trusted_proxy=*/false, /*tunnel=*/true, TRAFFIC_ANNOTATION_FOR_TESTS, net_log_)); TestCompletionCallback callback; client_socket_wrapper_->Connect(callback.callback()); EXPECT_THAT(callback.WaitForResult(), IsOk()); EXPECT_EQ(socket_factory_.GetLastProducedUDPSocket()->tag(), tag); EXPECT_TRUE(socket_factory_.GetLastProducedUDPSocket() ->tagged_before_data_transferred()); client_socket_wrapper_.reset(); EXPECT_TRUE(mock_quic_data_.AllReadDataConsumed()); EXPECT_TRUE(mock_quic_data_.AllWriteDataConsumed()); } #endif INSTANTIATE_TEST_CASE_P( VersionIncludeStreamDependencySequence, HttpProxyClientSocketWrapperTest, ::testing::Combine(::testing::ValuesIn(AllSupportedTransportVersions()), ::testing::Bool())); }; // namespace test }; // namespace net
5,741
4,822
/* * SPDX-License-Identifier: Apache-2.0 * * The OpenSearch Contributors require contributions made to * this file be licensed under the Apache-2.0 license or a * compatible open source license. */ /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Modifications Copyright OpenSearch Contributors. See * GitHub history for details. */ package org.opensearch.painless; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Locale; import java.util.Map; public class GetByPathAugmentationTests extends ScriptTestCase { private final String k001Key = "k011"; private final String k001Value = "b"; private final Map<String, String> k001Obj = new HashMap<>(); private final String k001MapStr = "['" + k001Key + "': '" + k001Value + "']"; private final String mapMapList = "['k0': ['k01': [['k010': 'a'], " + k001MapStr + "]], 'k1': ['q']]"; private final String l2m2l1Index0 = "ll0"; private final String l2m2l1Index1 = "ll1"; private final List<String> l2m2l1Obj = new ArrayList<>(); private final String l2m2l1Str = "['" + l2m2l1Index0 + "', '" + l2m2l1Index1 + "']"; private final String listMapListList = "[['m0':'v0'],['m1':'v1'],['m2':['l0','l1', " + l2m2l1Str + "]]]"; private final String mapList = "['key0': ['a', 'b'], 'key1': ['c', 'd']]"; private final String mapMap = "['a': ['b': 'c']]"; public GetByPathAugmentationTests() { l2m2l1Obj.add(l2m2l1Index0); l2m2l1Obj.add(l2m2l1Index1); k001Obj.put(k001Key, k001Value); } private String toScript(String collection, String key) { return String.format(Locale.ROOT, "return %s.getByPath('%s')", collection, key); } private String toScript(String collection, String key, String defaultValue) { return String.format(Locale.ROOT, "return %s.getByPath('%s', %s)", collection, key, defaultValue); } private String numberFormat(String unparsable, String path, int i) { String format = "Could not parse [%s] as a int index into list at path [%s] and index [%d]"; return String.format(Locale.ROOT, format, unparsable, path, i); } private String missingValue(String path) { return String.format(Locale.ROOT, "Could not find value at path [%s]", path); } private void assertPathValue(String collection, String key, Object value) { assertEquals(value, exec(toScript(collection, key))); } private void assertPathDefaultValue(String collection, String key, Object value, String defaultValue) { assertEquals(value, exec(toScript(collection, key, defaultValue))); } private IllegalArgumentException assertPathError(String collection, String key, String message) { return assertPathError(toScript(collection, key), message); } private IllegalArgumentException assertPathError(String collection, String key, String defaultValue, String message) { return assertPathError(toScript(collection, key, defaultValue), message); } private IllegalArgumentException assertPathError(String script, String message) { IllegalArgumentException illegal = expectScriptThrows(IllegalArgumentException.class, () -> exec(script)); assertEquals(message, illegal.getMessage()); return illegal; } public void testOneLevelMap() { assertPathValue("['k0':'v0']", "k0", "v0"); } public void testOneLevelList() { assertPathValue("['a','b','c','d']", "2", "c"); } public void testTwoLevelMapList() { assertPathValue("['key0': ['a', 'b'], 'key1': ['c', 'd']]", "key1.0", "c"); } public void testMapDiffSizeList() { assertPathValue("['k0': ['a','b','c','d'], 'k1': ['q']]", "k0.3", "d"); } public void testBiMapList() { assertPathValue(mapMapList, "k0.k01.1.k011", k001Value); } public void testBiMapListObject() { assertPathValue(mapMapList, "k0.k01.1", k001Obj); } public void testListMap() { assertPathValue("[['key0': 'value0'], ['key1': 'value1']]", "1.key1", "value1"); } public void testTriList() { assertPathValue("[['a','b'],['c','d'],[['e','f'],['g','h']]]", "2.1.1", "h"); } public void testMapBiListObject() { assertPathValue(listMapListList, "2.m2.2", l2m2l1Obj); } public void testMapBiList() { assertPathValue(listMapListList, "2.m2.2.1", l2m2l1Index1); } public void testGetCollection() { List<String> k1List = new ArrayList<>(); k1List.add("c"); k1List.add("d"); assertPathValue("['key0': ['a', 'b'], 'key1': ['c', 'd']]", "key1", k1List); } public void testMapListDefaultOneLevel() { assertPathDefaultValue(mapList, "key2", "x", "'x'"); } public void testMapListDefaultTwoLevel() { assertPathDefaultValue(mapList, "key1.1", "d", "'x'"); } public void testBiMapListDefault() { assertPathDefaultValue(mapMapList, "k0.k01.1.k012", "foo", "'foo'"); } public void testBiMapListDefaultExists() { assertPathDefaultValue(mapMapList, "k0.k01.1.k011", "b", "'foo'"); } public void testBiMapListDefaultObjectExists() { assertPathDefaultValue(mapMapList, "k0.k01.1", k001Obj, "'foo'"); } public void testBiMapListDefaultObject() { assertPathDefaultValue(mapMapList, "k0.k01.9", k001Obj, k001MapStr); } public void testListMapBiListDefaultExists() { assertPathDefaultValue(listMapListList, "2.m2.2", l2m2l1Obj, "'foo'"); } public void testListMapBiListDefaultObject() { assertPathDefaultValue(listMapListList, "2.m2.9", l2m2l1Obj, l2m2l1Str); } public void testBiListBadIndex() { String path = "1.k0"; IllegalArgumentException err = assertPathError("[['a','b'],['c','d']]", path, numberFormat("k0", path, 1)); assertEquals(err.getCause().getClass(), NumberFormatException.class); } public void testBiMapListMissingLast() { String path = "k0.k01.1.k012"; assertPathError(mapMapList, path, missingValue(path)); } public void testBiMapListBadIndex() { String path = "k0.k01.k012"; IllegalArgumentException err = assertPathError(mapMapList, path, numberFormat("k012", path, 2)); assertEquals(err.getCause().getClass(), NumberFormatException.class); } public void testListMapBiListMissingObject() { String path = "2.m2.12"; assertPathError(listMapListList, path, missingValue(path)); } public void testListMapBiListBadIndexAtObject() { String path = "2.m2.a8"; IllegalArgumentException err = assertPathError(listMapListList, path, numberFormat("a8", path, 2)); assertEquals(err.getCause().getClass(), NumberFormatException.class); } public void testNonContainer() { assertPathError(mapMap, "a.b.c", "Non-container [java.lang.String] at [c], index [2] in path [a.b.c]"); } public void testMissingPath() { assertPathError(mapMap, "", "Missing path"); } public void testDoubleDot() { assertPathError(mapMap, "a..b", "Extra '.' in path [a..b] at index [1]"); } public void testTrailingDot() { assertPathError(mapMap, "a.b.", "Trailing '.' in path [a.b.]"); } public void testBiListDefaultBadIndex() { String path = "1.k0"; IllegalArgumentException err = assertPathError("[['a','b'],['c','d']]", path, "'foo'", numberFormat("k0", path, 1)); assertEquals(err.getCause().getClass(), NumberFormatException.class); } public void testBiMapListDefaultBadIndex() { String path = "k0.k01.k012"; IllegalArgumentException err = assertPathError(mapMapList, path, "'foo'", numberFormat("k012", path, 2)); assertEquals(err.getCause().getClass(), NumberFormatException.class); } public void testListMapBiListObjectDefaultBadIndex() { String path = "2.m2.a8"; IllegalArgumentException err = assertPathError(listMapListList, path, "'foo'", numberFormat("a8", path, 2)); assertEquals(err.getCause().getClass(), NumberFormatException.class); } public void testNonContainerDefaultBadIndex() { assertPathError(mapMap, "a.b.c", "'foo'", "Non-container [java.lang.String] at [c], index [2] in path [a.b.c]"); } public void testDoubleDotDefault() { assertPathError(mapMap, "a..b", "'foo'", "Extra '.' in path [a..b] at index [1]"); } public void testTrailingDotDefault() { assertPathError(mapMap, "a.b.", "'foo'", "Trailing '.' in path [a.b.]"); } }
3,594
357
package com.vmware.identity.rest.core.client.test.integration.util; import static com.vmware.identity.rest.core.client.URIFactory.buildURI; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.StringWriter; import java.net.URI; import java.text.DateFormat; import java.text.SimpleDateFormat; import java.util.Date; import java.util.TimeZone; import java.util.concurrent.TimeUnit; import javax.net.ssl.HostnameVerifier; import javax.net.ssl.SSLContext; import javax.xml.soap.MessageFactory; import javax.xml.soap.SOAPBody; import javax.xml.soap.SOAPException; import javax.xml.soap.SOAPHeader; import javax.xml.soap.SOAPMessage; import javax.xml.transform.Transformer; import javax.xml.transform.TransformerException; import javax.xml.transform.TransformerFactory; import javax.xml.transform.TransformerFactoryConfigurationError; import javax.xml.transform.dom.DOMSource; import javax.xml.transform.stream.StreamResult; import org.apache.commons.codec.binary.Base64; import org.apache.http.HttpEntity; import org.apache.http.HttpStatus; import org.apache.http.client.methods.CloseableHttpResponse; import org.apache.http.client.methods.HttpPost; import org.apache.http.entity.StringEntity; import org.apache.http.util.EntityUtils; import org.w3c.dom.Node; import com.vmware.identity.rest.core.client.BaseClient; import com.vmware.identity.rest.core.client.exceptions.ClientException; public class SAMLClient extends BaseClient { private static final String TOKEN_TENANT_URI_STRING = "/sts/STSService/%s"; private static final String XML_DATE_FORMAT = "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'"; private static final String RST_FILE = "/soap_request.xml"; private static final String CREATED_TAG = "wsu:Created"; private static final String EXPIRES_TAG = "wsu:Expires"; private static final String USERNAME_TAG = "wsse:Username"; private static final String PASSWORD_TAG = "<PASSWORD>"; private static final String SAML2_ASSERTION_TAG = "saml2:Assertion"; private static final String SOAP_ACTION_ISSUE = "http://docs.oasis-open.org/ws-sx/ws-trust/200512/RST/Issue"; private static final String GET_TOKEN_METHOD = "GetSamlToken"; private static final String REQUEST_CONTENT_TYPE = "text/xml"; public SAMLClient(String host, HostnameVerifier verifier, SSLContext sslContext) { super(host, verifier, sslContext); } public String getAccessToken(String tenant, String username, String password) throws SOAPException, IOException, ClientException { InputStream is = SAMLClient.class.getResourceAsStream(RST_FILE); SOAPMessage soapRequest = MessageFactory.newInstance().createMessage(null, is); SOAPHeader header = soapRequest.getSOAPHeader(); SOAPBody body = soapRequest.getSOAPBody(); DateFormat dateFormat = new SimpleDateFormat(XML_DATE_FORMAT); dateFormat.setTimeZone(TimeZone.getTimeZone("GMT")); final long now = System.currentTimeMillis(); String createDate = dateFormat.format(new Date(now)); String expireDate = dateFormat.format(new Date(now + TimeUnit.SECONDS.toMillis(600))); Node node = header.getElementsByTagName(CREATED_TAG).item(0); node.setTextContent(createDate); node = header.getElementsByTagName(EXPIRES_TAG).item(0); node.setTextContent(expireDate); node = header.getElementsByTagName(USERNAME_TAG).item(0); node.setTextContent(username); node = header.getElementsByTagName(PASSWORD_TAG).item(0); node.setTextContent(password); node = body.getElementsByTagName(CREATED_TAG).item(0); node.setTextContent(createDate); node = body.getElementsByTagName(EXPIRES_TAG).item(0); node.setTextContent(expireDate); URI uri = buildURI(getHostRetriever(), TOKEN_TENANT_URI_STRING, tenant); HttpPost post = new HttpPost(uri); post.setHeader("Content-Type", REQUEST_CONTENT_TYPE); post.setHeader("SOAPAction", SOAP_ACTION_ISSUE); post.setHeader("Method", GET_TOKEN_METHOD); ByteArrayOutputStream out = new ByteArrayOutputStream(); soapRequest.writeTo(out); String messageStr = new String(out.toByteArray()); HttpEntity entity = new StringEntity(messageStr); post.setEntity(entity); String token; try (CloseableHttpResponse response = getClient().execute(post)) { int statusCode = response.getStatusLine().getStatusCode(); if (statusCode == HttpStatus.SC_OK) { try { SOAPMessage soapResponse = MessageFactory.newInstance().createMessage(null, response.getEntity().getContent()); Node tokenNode = soapResponse.getSOAPBody().getElementsByTagName(SAML2_ASSERTION_TAG).item(0); StringWriter writer = new StringWriter(); Transformer transformer = TransformerFactory.newInstance().newTransformer(); transformer.transform(new DOMSource(tokenNode), new StreamResult(writer)); token = writer.toString(); } catch (IOException | TransformerFactoryConfigurationError | TransformerException e) { throw new IllegalStateException("An error occurred unmarshalling the response", e); } } else { String error = EntityUtils.toString(response.getEntity()); throw new IllegalArgumentException("An error (" + statusCode + ") occurred when retrieving the access token from STS. " + error); } } return Base64.encodeBase64String(token.getBytes()); } }
2,145
14,425
<filename>hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/Compressions.h /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef COMPRESSIONS_H_ #define COMPRESSIONS_H_ #include <string> #include <vector> #include "lib/Streams.h" namespace NativeTask { using std::vector; using std::string; class CompressStream : public FilterOutputStream { public: CompressStream(OutputStream * stream) : FilterOutputStream(stream) { } virtual ~CompressStream(); virtual void writeDirect(const void * buff, uint32_t length); virtual void finish() { flush(); } virtual void resetState() { } virtual uint64_t compressedBytesWritten() { return 0; } }; class DecompressStream : public FilterInputStream { public: DecompressStream(InputStream * stream) : FilterInputStream(stream) { } virtual ~DecompressStream(); virtual int32_t readDirect(void * buff, uint32_t length); virtual uint64_t compressedBytesRead() { return 0; } }; class Compressions { protected: class Codec { public: string name; string extension; Codec(const string & name, const string & extension) : name(name), extension(extension) { } }; static vector<Codec> SupportedCodecs; static void initCodecs(); public: static const Codec GzipCodec; static const Codec SnappyCodec; static const Codec Lz4Codec; public: static bool support(const string & codec); static const string getExtension(const string & codec); static const string getCodec(const string & extension); static const string getCodecByFile(const string & file); static CompressStream * getCompressionStream(const string & codec, OutputStream * stream, uint32_t bufferSizeHint); static DecompressStream * getDecompressionStream(const string & codec, InputStream * stream, uint32_t bufferSizeHint); }; } // namespace NativeTask #endif /* COMPRESSIONS_H_ */
819
1,127
// Copyright (C) 2018-2022 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #include <vector> #include "low_precision_transformations/move_fake_quantize_transformation.hpp" #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; const std::vector<ngraph::element::Type> netPrecisions = { ngraph::element::f32, //ngraph::element::f16 }; const std::vector<ngraph::pass::low_precision::LayerTransformation::Params> trasformationParamValues = { LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParams().setUpdatePrecisions(true) }; namespace testValues1 { const std::vector<LayerTestsDefinitions::MoveFakeQuantizeTransformationParam> params = { // without operation { 3, "", { 256ul, {}, {0.f}, {2.55f}, {0.f}, {2.55f}}, {}, {}, "Concatenation", "U8", 1, }, // with ReLU operation { 3, "relu", { 256ul, {}, { -12.7f }, { 12.7f }, { -12.7f }, { 12.7f }}, {}, {}, "Concatenation", "U8", 1 }, // Q/DQ { 3, "", { 256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f} }, { ngraph::element::u8 }, { { ngraph::element::f32 }, {}, { 0.01f } }, "Concatenation", "U8", 1 }, // Q/DQ with ReLU { 3, "relu", { 256ul, {}, {0.f}, {2.55f}, {0.f}, {255.f} }, { ngraph::element::u8 }, { { ngraph::element::f32 }, {}, { 0.01f } }, "Concatenation", "U8", 1 }, // multi-chanels { 3, "relu", { 256ul, {{1, 6, 1, 1}, {1, 6, 1, 1}, {1, 6, 1, 1}, {1, 6, 1, 1}}, {0.f, 0.f, 0.f, 0.f, 0.f, 0.f}, {2.55f, 2.55f / 2.f, 2.55f / 3.f, 2.55f / 4.f, 2.55f / 5.f, 2.55f / 6.f}, {-128.f, -128.f, -128.f, -128.f, -128.f, -128.f}, {127.f, 127.f, 127.f, 127.f, 127.f, 127.f} }, {}, {}, "Concatenation", "I8", 1 }, // Q/DQ with multi-channels multiply { 3, "", { 256ul, {{1, 6, 1, 1}, {1, 6, 1, 1}, {1, 6, 1, 1}, {1, 6, 1, 1}}, {0.f, 0.f, 0.f, 0.f, 0.f, 0.f}, {2.55f, 2.55f / 2.f, 2.55f / 3.f, 2.55f / 4.f, 2.55f / 5.f, 2.55f / 6.f}, {0.f, 0.f, 0.f, 0.f, 0.f, 0.f}, {255.f, 255.f / 2.f, 255.f / 3.f, 255.f / 4.f, 255.f / 5.f, 255.f / 6.f}, }, { ngraph::element::u8 }, { { ngraph::element::f32 }, {}, { {0.01f, 0.02f, 0.03f, 0.04f, 0.05f, 0.06f}, ngraph::element::f32, {1, 6, 1, 1} }, }, "Concatenation", "U8", 1 }, // Q/DQ with multi-channels subtract { 3, "", { 256ul, {{1, 6, 1, 1}, {1, 6, 1, 1}, {1, 6, 1, 1}, {1, 6, 1, 1}}, {0.f, 0.f, 0.f, 0.f, 0.f, 0.f}, {2.55f, 2.55f / 2.f, 2.55f / 3.f, 2.55f / 4.f, 2.55f / 5.f, 2.55f / 6.f}, {0.f, 0.f, 0.f, 0.f, 0.f, 0.f}, {255.f, 255.f / 2.f, 255.f / 3.f, 255.f / 4.f, 255.f / 5.f, 255.f / 6.f}, }, { ngraph::element::u8 }, { { ngraph::element::f32 }, { {-127.f, -127.f / 2.f, -127.f / 3.f, -127.f / 4.f, -127.f / 5.f, -127.f / 6.f}, ngraph::element::f32, {1, 6, 1, 1} }, { 0.01f }, }, "Concatenation", "U8", 1 }, }; const std::vector<std::vector<ngraph::PartialShape>> shapes = { {{ 1, 1, 16, 16 }, { 1, 2, 16, 16 }, { 1, 3, 16, 16 }}, {{ 4, 1, 16, 16 }, { 4, 2, 16, 16 }, { 4, 3, 16, 16 }} }; INSTANTIATE_TEST_SUITE_P(smoke_LPT, MoveFakeQuantizeTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), ::testing::ValuesIn(shapes), ::testing::Values(CommonTestUtils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn({false, true}), ::testing::ValuesIn(params)), MoveFakeQuantizeTransformation::getTestCaseName); } // namespace testValues1 namespace testValues2 { const std::vector<LayerTestsDefinitions::MoveFakeQuantizeTransformationParam> params = { // negative axis { 3, "", {256ul, {}, {-1.28f}, {1.27f}, {-1.28f}, {1.27f}}, {}, {}, "Concatenation", "FP32", -1 }, }; const std::vector<std::vector<ngraph::PartialShape>> shapes = { {{ 1, 1, 16, 16 }, { 1, 1, 16, 16 }, { 1, 1, 16, 16 }}, {{ 4, 1, 16, 16 }, { 4, 1, 16, 16 }, { 4, 1, 16, 16 }} }; INSTANTIATE_TEST_SUITE_P(smoke_LPT, MoveFakeQuantizeTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), ::testing::ValuesIn(shapes), ::testing::Values(CommonTestUtils::DEVICE_CPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn({false}), ::testing::ValuesIn(params)), MoveFakeQuantizeTransformation::getTestCaseName); } // namespace testValues2
3,001
1,248
<filename>src/tests/plugins/banktransfer/test_api.py # # This file is part of pretix (Community Edition). # # Copyright (C) 2014-2020 <NAME> and contributors # Copyright (C) 2020-2021 rami.io GmbH and contributors # # This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General # Public License as published by the Free Software Foundation in version 3 of the License. # # ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are # applicable granting you additional permissions and placing additional restrictions on your usage of this software. # Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive # this file, see <https://pretix.eu/about/en/license>. # # This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied # warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more # details. # # You should have received a copy of the GNU Affero General Public License along with this program. If not, see # <https://www.gnu.org/licenses/>. # import copy import json from datetime import datetime, timedelta from unittest import mock import pytest from django.utils.timezone import now from django_scopes import scopes_disabled from pytz import UTC from pretix.base.models import ( Event, Item, Order, OrderPosition, Organizer, Quota, Team, User, ) from pretix.plugins.banktransfer.models import BankImportJob, BankTransaction @pytest.fixture def env(): o = Organizer.objects.create(name='Dummy', slug='dummy') event = Event.objects.create( organizer=o, name='Dummy', slug='dummy', date_from=now(), plugins='pretix.plugins.banktransfer' ) user = User.objects.create_user('<EMAIL>', 'dummy') t = Team.objects.create(organizer=event.organizer, can_view_orders=True, can_change_orders=True) t.members.add(user) t.limit_events.add(event) o1 = Order.objects.create( code='1Z3AS', event=event, status=Order.STATUS_PENDING, datetime=now(), expires=now() + timedelta(days=10), total=23 ) o2 = Order.objects.create( code='6789Z', event=event, status=Order.STATUS_CANCELED, datetime=now(), expires=now() + timedelta(days=10), total=23 ) quota = Quota.objects.create(name="Test", size=2, event=event) item1 = Item.objects.create(event=event, name="Ticket", default_price=23) quota.items.add(item1) OrderPosition.objects.create(order=o1, item=item1, variation=None, price=23) return event, user, o1, o2 RES_JOB = { 'event': 'dummy', 'id': 1, 'transactions': [ {'comment': '', 'message': '', 'payer': 'Foo', 'reference': '', 'checksum': '', 'iban': '', 'bic': '', 'amount': '0.00', 'date': 'unknown', 'state': 'error', 'order': None } ], 'created': '2017-06-27T09:13:35.785251Z', 'state': 'pending' } @pytest.mark.django_db def test_api_list(env, client): testtime = datetime(2017, 12, 1, 10, 0, 0, tzinfo=UTC) with mock.patch('django.utils.timezone.now') as mock_now: mock_now.return_value = testtime job = BankImportJob.objects.create(event=env[0], organizer=env[0].organizer) BankTransaction.objects.create(event=env[0], import_job=job, payer='Foo', state=BankTransaction.STATE_ERROR, amount=0, date='unknown') res = copy.copy(RES_JOB) res['id'] = job.pk res['created'] = testtime.isoformat().replace('+00:00', 'Z') client.login(email='<EMAIL>', password='<PASSWORD>') r = json.loads( client.get('/api/v1/organizers/{}/bankimportjobs/'.format(env[0].organizer.slug)).content.decode('utf-8') ) assert r['results'] == [res] @pytest.mark.django_db def test_api_detail(env, client): testtime = datetime(2017, 12, 1, 10, 0, 0, tzinfo=UTC) with mock.patch('django.utils.timezone.now') as mock_now: mock_now.return_value = testtime job = BankImportJob.objects.create(event=env[0], organizer=env[0].organizer) BankTransaction.objects.create(event=env[0], import_job=job, payer='Foo', state=BankTransaction.STATE_ERROR, amount=0, date='unknown') res = copy.copy(RES_JOB) res['id'] = job.pk res['created'] = testtime.isoformat().replace('+00:00', 'Z') client.login(email='<EMAIL>', password='<PASSWORD>') r = json.loads( client.get( '/api/v1/organizers/{}/bankimportjobs/{}/'.format(env[0].organizer.slug, job.pk) ).content.decode('utf-8') ) assert r == res @pytest.mark.django_db(transaction=True) def test_api_create(env, client): client.login(email='<EMAIL>', password='<PASSWORD>') r = client.post( '/api/v1/organizers/{}/bankimportjobs/'.format(env[0].organizer.slug), json.dumps({ 'event': 'dummy', 'transactions': [ { 'payer': 'Foo', 'reference': 'DUMMY-1Z3AS', 'amount': '23.00', 'date': 'yesterday' # test bogus date format } ] }), content_type="application/json" ) assert r.status_code == 201 rdata = json.loads(r.content.decode('utf-8')) # This is only because we don't run celery in tests, otherwise it wouldn't be completed yet. assert rdata['state'] == 'completed' assert len(rdata['transactions']) == 1 assert rdata['transactions'][0]['checksum'] env[2].refresh_from_db() assert env[2].status == Order.STATUS_PAID @pytest.mark.django_db(transaction=True) def test_api_create_with_iban_bic(env, client): client.login(email='<EMAIL>', password='<PASSWORD>') r = client.post( '/api/v1/organizers/{}/bankimportjobs/'.format(env[0].organizer.slug), json.dumps({ 'event': 'dummy', 'transactions': [ { 'payer': 'Foo', 'reference': 'DUMMY-1Z3AS', 'amount': '23.00', 'iban': 'NL79RABO5373380466', 'bic': 'GENODEM1GLS', 'date': 'yesterday' # test bogus date format } ] }), content_type="application/json" ) assert r.status_code == 201 rdata = json.loads(r.content.decode('utf-8')) # This is only because we don't run celery in tests, otherwise it wouldn't be completed yet. assert rdata['state'] == 'completed' assert len(rdata['transactions']) == 1 assert rdata['transactions'][0]['checksum'] env[2].refresh_from_db() assert env[2].status == Order.STATUS_PAID with scopes_disabled(): assert env[2].payments.first().info_data['iban'] == 'NL79RABO5373380466'
3,064
14,668
<filename>ash/public/cpp/nearby_share_delegate.h // Copyright 2020 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef ASH_PUBLIC_CPP_NEARBY_SHARE_DELEGATE_H_ #define ASH_PUBLIC_CPP_NEARBY_SHARE_DELEGATE_H_ #include "ash/public/cpp/ash_public_export.h" #include "base/observer_list_types.h" namespace base { class TimeTicks; } // namespace base namespace ash { // This delegate is a singleton used by the // NearbyShareVisibilityFeaturePodButton in //ash to communicate with the // NearbySharingService KeyedService in //chrome. class ASH_PUBLIC_EXPORT NearbyShareDelegate { public: virtual ~NearbyShareDelegate() = default; // Used by the pod button to determine whether it should be visible. virtual bool IsPodButtonVisible() = 0; // Gets the current high visibility state from the NearbySharingService. virtual bool IsHighVisibilityOn() = 0; // Returns true if EnableHighVisibility() has been called but // NearbyShareDelegate has not yet been informed that the request has // concluded. virtual bool IsEnableHighVisibilityRequestActive() const = 0; // If high visibility is on, returns the time when the delegate // will turn it off. May return any value if high visibility is off. virtual base::TimeTicks HighVisibilityShutoffTime() const = 0; // Request high visibility be turned on. If Nearby Share is disabled in prefs, // this will instead redirect the user to onboarding. virtual void EnableHighVisibility() = 0; // Request high visibility be turned off. virtual void DisableHighVisibility() = 0; // Open the settings page for Nearby Share, Used when the user clicks on the // label under the pod button. virtual void ShowNearbyShareSettings() const = 0; }; } // namespace ash #endif // ASH_PUBLIC_CPP_NEARBY_SHARE_DELEGATE_H_
556
663
import numpy as np from skmultiflow.drift_detection import ADWIN def demo(): """ _test_adwin In this demo, an ADWIN object evaluates a sequence of numbers corresponding to 2 distributions. The ADWIN object indicates the indices where change is detected. The first half of the data is a sequence of randomly generated 0's and 1's. The second half of the data is a normal distribution of integers from 0 to 7. """ adwin = ADWIN() size = 2000 change_start = 999 np.random.seed(1) data_stream = np.random.randint(2, size=size) data_stream[change_start:] = np.random.randint(8, size=size-change_start) for i in range(size): adwin.add_element(data_stream[i]) if adwin.detected_change(): print('Change has been detected in data: ' + str(data_stream[i]) + ' - of index: ' + str(i)) if __name__ == '__main__': demo()
333
376
<gh_stars>100-1000 from .target_classification import LBHinge
20
24,206
<gh_stars>1000+ package com.alibaba.excel.metadata.property; import com.alibaba.excel.annotation.format.DateTimeFormat; import com.alibaba.excel.util.BooleanUtils; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.Setter; /** * Configuration from annotations * * @author <NAME> */ @Getter @Setter @EqualsAndHashCode public class DateTimeFormatProperty { private String format; private Boolean use1904windowing; public DateTimeFormatProperty(String format, Boolean use1904windowing) { this.format = format; this.use1904windowing = use1904windowing; } public static DateTimeFormatProperty build(DateTimeFormat dateTimeFormat) { if (dateTimeFormat == null) { return null; } return new DateTimeFormatProperty(dateTimeFormat.value(), BooleanUtils.isTrue(dateTimeFormat.use1904windowing().getBooleanValue())); } }
338
348
{"nom":"Boulleret","circ":"1ère circonscription","dpt":"Cher","inscrits":1163,"abs":622,"votants":541,"blancs":40,"nuls":10,"exp":491,"res":[{"nuance":"REM","nom":"<NAME>","voix":279},{"nuance":"LR","nom":"M. <NAME>","voix":212}]}
93
1,305
/* * Copyright (c) 2000, 2003, Oracle and/or its affiliates. All rights reserved. * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. * * * * * * * * * * * * * * * * * * * * */ package com.sun.corba.se.impl.protocol.giopmsgheaders; import com.sun.corba.se.spi.ior.ObjectKey; import com.sun.corba.se.spi.ior.iiop.GIOPVersion; import com.sun.corba.se.spi.orb.ORB; /** * This implements the GIOP 1.2 LocateRequest header. * * @author <NAME> 05/14/2000 */ public final class LocateRequestMessage_1_2 extends Message_1_2 implements LocateRequestMessage { // Instance variables private ORB orb = null; private ObjectKey objectKey = null; private TargetAddress target = null; // Constructors LocateRequestMessage_1_2(ORB orb) { this.orb = orb; } LocateRequestMessage_1_2(ORB orb, int _request_id, TargetAddress _target) { super(Message.GIOPBigMagic, GIOPVersion.V1_2, FLAG_NO_FRAG_BIG_ENDIAN, Message.GIOPLocateRequest, 0); this.orb = orb; request_id = _request_id; target = _target; } // Accessor methods (LocateRequestMessage interface) public int getRequestId() { return this.request_id; } public ObjectKey getObjectKey() { if (this.objectKey == null) { // this will raise a MARSHAL exception upon errors. this.objectKey = MessageBase.extractObjectKey(target, orb); } return this.objectKey; } // IO methods public void read(org.omg.CORBA.portable.InputStream istream) { super.read(istream); this.request_id = istream.read_ulong(); this.target = TargetAddressHelper.read(istream); getObjectKey(); // this does AddressingDisposition check } public void write(org.omg.CORBA.portable.OutputStream ostream) { super.write(ostream); ostream.write_ulong (this.request_id); nullCheck(this.target); TargetAddressHelper.write(ostream, this.target); } public void callback(MessageHandler handler) throws java.io.IOException { handler.handleInput(this); } } // class LocateRequestMessage_1_2
907
2,856
<gh_stars>1000+ # Copyright (C) 2011, 2018 by <NAME> (<EMAIL>) # # This file is part of the Biopython distribution and governed by your # choice of the "Biopython License Agreement" or the "BSD 3-Clause License". # Please see the LICENSE file that should have been included as part of this # package. """Classes for the support of yn00. Yang and Nielsen 2000, estimating synonymous and nonsynonymous substitution rates in pairwise comparison of protein-coding DNA sequences. """ import os.path from ._paml import Paml from . import _parse_yn00 class Yn00Error(EnvironmentError): """yn00 failed. Run with verbose=True to view yn00's error message.""" class Yn00(Paml): """An interface to yn00, part of the PAML package.""" def __init__(self, alignment=None, working_dir=None, out_file=None): """Initialize the Yn00 instance. The user may optionally pass in strings specifying the locations of the input alignment, the working directory and the final output file. """ Paml.__init__(self, alignment, working_dir, out_file) self.ctl_file = "yn00.ctl" self._options = { "verbose": None, "icode": None, "weighting": None, "commonf3x4": None, "ndata": None, } def write_ctl_file(self): """Dynamically build a yn00 control file from the options. The control file is written to the location specified by the ctl_file property of the yn00 class. """ # Make sure all paths are relative to the working directory self._set_rel_paths() with open(self.ctl_file, "w") as ctl_handle: ctl_handle.write(f"seqfile = {self._rel_alignment}\n") ctl_handle.write(f"outfile = {self._rel_out_file}\n") for option in self._options.items(): if option[1] is None: # If an option has a value of None, there's no need # to write it in the control file; it's normally just # commented out. continue ctl_handle.write(f"{option[0]} = {option[1]}\n") def read_ctl_file(self, ctl_file): """Parse a control file and load the options into the yn00 instance.""" temp_options = {} if not os.path.isfile(ctl_file): raise FileNotFoundError(f"File not found: {ctl_file!r}") else: with open(ctl_file) as ctl_handle: for line in ctl_handle: line = line.strip() uncommented = line.split("*", 1)[0] if uncommented != "": if "=" not in uncommented: raise AttributeError( f"Malformed line in control file:\n{line!r}" ) (option, value) = uncommented.split("=") option = option.strip() value = value.strip() if option == "seqfile": self.alignment = value elif option == "outfile": self.out_file = value elif option not in self._options: raise KeyError(f"Invalid option: {option}") else: if "." in value or "e-" in value: try: converted_value = float(value) except ValueError: converted_value = value else: try: converted_value = int(value) except ValueError: converted_value = value temp_options[option] = converted_value for option in self._options: if option in temp_options: self._options[option] = temp_options[option] else: self._options[option] = None def run(self, ctl_file=None, verbose=False, command="yn00", parse=True): """Run yn00 using the current configuration. If parse is True then read and return the result, otherwise return None. """ Paml.run(self, ctl_file, verbose, command) if parse: return read(self.out_file) return None def read(results_file): """Parse a yn00 results file.""" results = {} if not os.path.exists(results_file): raise FileNotFoundError("Results file does not exist.") with open(results_file) as handle: lines = handle.readlines() if not lines: raise ValueError( "Empty results file. Did YN00 exit successfully? " "Run 'Yn00.run()' with 'verbose=True'." ) for line_num, line in enumerate(lines): if "(A) Nei-Gojobori (1986) method" in line: ng86_start = line_num + 1 elif "(B) Yang & Nielsen (2000) method" in line: (results, sequences) = _parse_yn00.parse_ng86( lines[ng86_start:line_num], results ) yn00_start = line_num + 1 elif "(C) LWL85, LPB93 & LWLm methods" in line: results = _parse_yn00.parse_yn00( lines[yn00_start:line_num], results, sequences ) results = _parse_yn00.parse_others( lines[line_num + 1 :], results, sequences ) if not results: raise ValueError("Invalid results file.") return results
2,848
2,392
// Copyright (c) 2011 CNRS and LIRIS' Establishments (France). // All rights reserved. // // This file is part of CGAL (www.cgal.org) // // $URL: https://github.com/CGAL/cgal/blob/v5.1/Linear_cell_complex/include/CGAL/Linear_cell_complex.h $ // $Id: Linear_cell_complex.h 0308d1a 2020-03-27T18:35:15+01:00 Guillaume Damiand // SPDX-License-Identifier: LGPL-3.0-or-later OR LicenseRef-Commercial // // Author(s) : <NAME> <<EMAIL>> // #ifndef CGAL_LINEAR_CELL_COMPLEX_H #define CGAL_LINEAR_CELL_COMPLEX_H 1 #include <CGAL/Linear_cell_complex_fwd.h> #include <CGAL/Linear_cell_complex_for_combinatorial_map.h> #include <CGAL/Linear_cell_complex_traits.h> #include <CGAL/Linear_cell_complex_min_items.h> #include <CGAL/Combinatorial_map.h> #include <CGAL/CMap_linear_cell_complex_storages.h> namespace CGAL { /** @file Linear_cell_complex.h * Definition of a linear cell complex, i.e. a combinatorial map with points * associated to all vertices. Deprecated class. */ #if !defined(CGAL_NO_DEPRECATED_CODE) template < unsigned int d_, unsigned int ambient_dim, class Traits_, class Items_, class Alloc_, template<unsigned int,class,class,class,class> class CMap, class Storage_ > class CGAL_DEPRECATED Linear_cell_complex: public Linear_cell_complex_for_combinatorial_map<d_, ambient_dim, Traits_, Items_, Alloc_, CMap, Storage_> {}; #endif } // namespace CGAL #endif // CGAL_LINEAR_CELL_COMPLEX_H // // EOF //
648
456
// SPDX-License-Identifier: BSD-3-Clause // Copyright (c) 2004-2020 <NAME> // All rights reserved. #pragma once #include <djvUI/Widget.h> namespace djv { namespace UI { //! Tab bar widget. class TabBar : public Widget { DJV_NON_COPYABLE(TabBar); protected: void _init(const std::shared_ptr<System::Context>&); TabBar(); public: ~TabBar() override; static std::shared_ptr<TabBar> create(const std::shared_ptr<System::Context>&); //! \name Tabs ///@{ size_t getTabCount() const; void setTabs(const std::vector<std::string>&); void clearTabs(); void setText(int, const std::string&); void setTooltip(int, const std::string&); ///@} //! \name Current Tab ///@{ int getCurrentTab() const; void setCurrentTab(int); void setCurrentTabCallback(const std::function<void(int)>&); ///@} //! \name Options ///@{ bool areTabsClosable() const; void setTabsClosable(bool); void setTabCloseCallback(const std::function<void(int)>&); size_t getTextElide() const; void setTextElide(size_t); ///@] float getHeightForWidth(float) const override; protected: void _preLayoutEvent(System::Event::PreLayout&) override; void _layoutEvent(System::Event::Layout&) override; private: DJV_PRIVATE(); }; } // namespace UI } // namespace djv
853
945
#include "vnl/vnl_vector_fixed.hxx" VNL_VECTOR_FIXED_INSTANTIATE(signed long long, 2); VNL_VECTOR_FIXED_INSTANTIATE(signed long long, 3); VNL_VECTOR_FIXED_INSTANTIATE(signed long long, 4); VNL_VECTOR_FIXED_INSTANTIATE(signed long long, 8);
106
308
<reponame>melancholy/dd-trace-py import socket from typing import Optional _hostname = None # type: Optional[str] def get_hostname(): # type: () -> str global _hostname if not _hostname: _hostname = socket.gethostname() return _hostname
100
2,082
<reponame>Madhavan-Prabakaran/HackerRank_solutions // github.com/RodneyShag /* * For your reference: * * DoublyLinkedListNode { * int data; * DoublyLinkedListNode next; * DoublyLinkedListNode prev; * } * */ // Time Complexity: O(n) // Space Complexity: O(1) DoublyLinkedListNode reverse(DoublyLinkedListNode head) { if (head == null || head.next == null) { return head; } DoublyLinkedListNode prev = null; DoublyLinkedListNode curr = head; DoublyLinkedListNode next = null; while (curr != null) { next = curr.next; curr.next = prev; curr.prev = next; prev = curr; curr = next; } return prev; } // Discuss on HackerRank: https://www.hackerrank.com/challenges/reverse-a-doubly-linked-list/forum/comments/255770
350
1,755
<reponame>txwhhny/vtk /*========================================================================= Program: Visualization Toolkit Module: vtkFlagpoleLabel.h Copyright (c) <NAME>, <NAME>, <NAME> All rights reserved. See Copyright.txt or http://www.kitware.com/Copyright.htm for details. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the above copyright notice for more information. =========================================================================*/ /** * @class vtkFlagpoleLabel * @brief Renders a flagpole (line) with a label at the top that faces the camera * * This class draws a line from the base to the top of the flagpole. It then * places a text annotation at the top, centered horizontally. The text is * always oriented with the flagpole but will rotate aroundthe flagpole to * face the camera. */ #ifndef vtkFlagpoleLabel_h #define vtkFlagpoleLabel_h #include "vtkActor.h" #include "vtkNew.h" // For.... vtkNew! #include "vtkRenderingCoreModule.h" // For export macro #include "vtkSmartPointer.h" // For.... vtkSmartPointer! class vtkActor; class vtkImageData; class vtkLineSource; class vtkPolyData; class vtkPolyDataMapper; class vtkRenderer; class vtkTextProperty; class vtkTextRenderer; class VTKRENDERINGCORE_EXPORT vtkFlagpoleLabel : public vtkActor { public: static vtkFlagpoleLabel* New(); vtkTypeMacro(vtkFlagpoleLabel, vtkActor); void PrintSelf(ostream& os, vtkIndent indent) override; /** * The UTF-8 encoded string to display. * @{ */ void SetInput(const char* in); vtkGetStringMacro(Input); /** @} */ /** * The vtkTextProperty object that controls the rendered text. * @{ */ void SetTextProperty(vtkTextProperty* tprop); vtkGetObjectMacro(TextProperty, vtkTextProperty); /** @} */ /** * Force the actor to render during the opaque or translucent pass. * @{ */ void SetForceOpaque(bool opaque) override; bool GetForceOpaque() override; void ForceOpaqueOn() override; void ForceOpaqueOff() override; void SetForceTranslucent(bool trans) override; bool GetForceTranslucent() override; void ForceTranslucentOn() override; void ForceTranslucentOff() override; /**@}*/ /** * Defers to internal actor. */ vtkTypeBool HasTranslucentPolygonalGeometry() override; /** * Check/update geometry/texture in opaque pass, since it only happens once. */ int RenderOpaqueGeometry(vtkViewport* vp) override; /** * Just render in translucent pass, since it can execute multiple times * (depth peeling, for instance). */ int RenderTranslucentPolygonalGeometry(vtkViewport* vp) override; void ReleaseGraphicsResources(vtkWindow* win) override; double* GetBounds() override; using Superclass::GetBounds; /** * Set/Get the world coordinate position of the base */ vtkGetVector3Macro(BasePosition, double); void SetBasePosition(double x, double y, double z); /** * Set/Get the world coordinate position of the top */ vtkGetVector3Macro(TopPosition, double); void SetTopPosition(double x, double y, double z); /** * Set/Get the size of the flag. 1.0 is the default size * which corresponds to a preset texels/window value. Adjust this * to increase or decrease the default size. */ vtkGetMacro(FlagSize, double); vtkSetMacro(FlagSize, double); protected: vtkFlagpoleLabel(); ~vtkFlagpoleLabel() override; bool InputIsValid(); void UpdateInternals(vtkRenderer* ren); bool TextureIsStale(vtkRenderer* ren); void GenerateTexture(vtkRenderer* ren); bool QuadIsStale(vtkRenderer* ren); void GenerateQuad(vtkRenderer* ren); // Used by the opaque pass to tell the translucent pass not to render. void Invalidate(); bool IsValid(); // Used to sync the internal actor's state. void PreRender(); // Text specification: char* Input; vtkTextProperty* TextProperty; // Cached metadata to determine if things need rebuildin' int RenderedDPI; vtkTimeStamp InputMTime; // We cache this so we can recompute the bounds between renders, if needed. vtkSmartPointer<vtkRenderer> RenderedRenderer; // Rendering stuffies vtkNew<vtkTextRenderer> TextRenderer; vtkNew<vtkImageData> Image; vtkNew<vtkPolyData> Quad; vtkNew<vtkPolyDataMapper> QuadMapper; vtkNew<vtkActor> QuadActor; vtkNew<vtkPolyDataMapper> PoleMapper; vtkNew<vtkLineSource> LineSource; vtkNew<vtkActor> PoleActor; double TopPosition[3]; double BasePosition[3]; double FlagSize; private: vtkFlagpoleLabel(const vtkFlagpoleLabel&) = delete; void operator=(const vtkFlagpoleLabel&) = delete; }; #endif // vtkFlagpoleLabel_h
1,548
3,106
/** * (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. * This file is generated. Do not modify it manually! * @codegen-command : phps RepoSync intl_oss_fbt * @codegen-source : fbsource/xplat/intl/oss-fbt/packages/react-native-fbt/android/src/main/java/com/facebook/react/modules/FbtModule.java * @generated SignedSource<<c75dd5d57eb42750ed38a715e5031c00>> */ /* * Copyright (c) Meta Platforms, Inc. and affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ package com.facebook.react.modules; import android.util.Log; import androidx.annotation.NonNull; import com.facebook.react.bridge.ReactApplicationContext; import com.facebook.react.bridge.ReactContextBaseJavaModule; import com.facebook.react.bridge.ReactMethod; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.util.HashMap; import java.util.Iterator; import java.util.Map; import org.json.JSONException; import org.json.JSONObject; public class FbtModule extends ReactContextBaseJavaModule { public static final String NAME = "FbtModule"; private static final String RAW_FILE_NAME = "localizable"; private static final String RAW_RES_FOLDER_NAME = "raw"; private final ReactApplicationContext context; private Map<String, String> translationsCache; public FbtModule(ReactApplicationContext reactContext) { super(reactContext); this.context = reactContext; this.translationsCache = new HashMap<>(); readTranslationsFile(); } @Override @NonNull public String getName() { return NAME; } /** * Tries to get the string for the hash key from the cache. * * @param hash Key for the translation. * @return The translation for the key or empty string if not found. */ @ReactMethod(isBlockingSynchronousMethod = true) public String getString(String hash) { if (translationsCache.containsKey(hash)) { return translationsCache.get(hash); } // hash key not found. return ""; } @ReactMethod(isBlockingSynchronousMethod = true) public int getDictionarySize() { return translationsCache.size(); } /** 1- Reads the file from the "res/raw" folder. 2- Parses the json into a map. */ private void readTranslationsFile() { // Reads the file. String jsonString = null; try { jsonString = readLocalizedJSONFile(); } catch (IOException e) { Log.e(NAME, "Error reading from raw resources: " + e.getMessage()); } // Parses the json into a map. if (jsonString != null) { try { parseJsonToDict(jsonString); } catch (JSONException e) { Log.e(NAME, "Error parsing json file: " + e.getMessage()); } } } /** * Parses a JSON file with the format: { "key": "value" } * * <p>into a Map<String, String>. * * <p> * * <p>- An empty map is returned if no translations were found. - If there's a parsing error * getting a value, an empty value is added. - {@link #translationsCache} contains the results. * * @param jsonString The json-formatted string to parse. * @throws JSONException If the jsonString has an incorrect format. */ private void parseJsonToDict(String jsonString) throws JSONException { // reset cached dictionary translationsCache = new HashMap<>(); // parse the jsonString. JSONObject jsonObject = new JSONObject(jsonString); Iterator<?> keys = jsonObject.keys(); while (keys.hasNext()) { String key = (String) keys.next(); String value = ""; try { value = jsonObject.getString(key); } catch (JSONException e) { // Error parsing value. Keep going. Log.e(NAME, "Error parsing a field from json file: " + e.getMessage()); } translationsCache.put(key, value); } } /** * Reads text from an InputStream * * @return The text in the stream or null if an error occurs. */ private String readLocalizedJSONFile() throws IOException { int localizedJSONFileID = getLocalizedJSONFileID(); if (localizedJSONFileID == 0) { Log.w(NAME, "Translations file not found in raw folder"); return null; } // Takes the translations file from the raw folder InputStream inputStream = context.getResources().openRawResource(localizedJSONFileID); ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); byte[] buffer = new byte[1024]; int len; while ((len = inputStream.read(buffer)) != -1) { outputStream.write(buffer, 0, len); } outputStream.close(); inputStream.close(); return outputStream.toString(); } /** * Gets the raw resource ID * * @return The resource ID or 0 if it doesn't exist. */ private int getLocalizedJSONFileID() { return context .getResources() .getIdentifier(RAW_FILE_NAME, RAW_RES_FOLDER_NAME, context.getPackageName()); } }
1,686
3,680
// // Copyright 2018 Pixar // // Licensed under the Apache License, Version 2.0 (the "Apache License") // with the following modification; you may not use this file except in // compliance with the Apache License and the following modification to it: // Section 6. Trademarks. is deleted and replaced with: // // 6. Trademarks. This License does not grant permission to use the trade // names, trademarks, service marks, or product names of the Licensor // and its affiliates, except as required to comply with Section 4(c) of // the License and to reproduce the content of the NOTICE file. // // You may obtain a copy of the Apache License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the Apache License with the above modification is // distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the Apache License for the specific // language governing permissions and limitations under the Apache License. // #include "pxr/pxr.h" #include "pxr/base/trace/trace.h" #include "pxr/base/trace/reporter.h" #include "pxr/base/trace/eventTree.h" #include "pxr/base/tf/stringUtils.h" #include <iostream> PXR_NAMESPACE_USING_DIRECTIVE static void TestMarkerMacro() { TRACE_MARKER("Static Marker A") TRACE_MARKER("Static Marker B") TRACE_MARKER("Static Marker C") TRACE_MARKER_DYNAMIC(TfStringPrintf("Dynamic Marker %s", "A")); TRACE_MARKER_DYNAMIC(TfStringPrintf("Dynamic Marker %s", "B")); TRACE_MARKER_DYNAMIC(TfStringPrintf("Dynamic Marker %s", "C")); } static TraceEventTree::MarkerValues GetTimeOfMarker(const std::string& MarkerName, const TraceEventTree::MarkerValuesMap& Markers) { TraceEventTree::MarkerValuesMap::const_iterator it = Markers.find(TfToken(MarkerName)); TF_AXIOM(it!= Markers.end()); return it->second; } int main(int argc, char* argv[]) { TraceCollector* collector = &TraceCollector::GetInstance(); TraceReporterPtr reporter = TraceReporter::GetGlobalReporter(); collector->SetEnabled(true); TestMarkerMacro(); collector->SetEnabled(false); reporter->ReportChromeTracing(std::cout); TraceEventTreeRefPtr timeline = reporter->GetEventTree(); TF_AXIOM(timeline); const TraceEventTree::MarkerValuesMap& Markers = timeline->GetMarkers(); // Test that the Markers are recorded in order TraceEvent::TimeStamp asTime = GetTimeOfMarker("Static Marker A", Markers)[0].first; TF_AXIOM(GetTimeOfMarker("Static Marker A", Markers).size() == 1); TraceEvent::TimeStamp bsTime = GetTimeOfMarker("Static Marker B", Markers)[0].first; TraceEvent::TimeStamp csTime = GetTimeOfMarker("Static Marker C", Markers)[0].first; TF_AXIOM(asTime < bsTime && bsTime < csTime); TraceEvent::TimeStamp adTime = GetTimeOfMarker("Dynamic Marker A", Markers)[0].first; TraceEvent::TimeStamp bdTime = GetTimeOfMarker("Dynamic Marker B", Markers)[0].first; TraceEvent::TimeStamp cdTime = GetTimeOfMarker("Dynamic Marker C", Markers)[0].first; TF_AXIOM(csTime < adTime && adTime < bdTime && bdTime < cdTime); // Run a second time to test merging collector->SetEnabled(true); TestMarkerMacro(); collector->SetEnabled(false); reporter->ReportChromeTracing(std::cout); TraceEventTreeRefPtr timeline2 = reporter->GetEventTree(); const TraceEventTree::MarkerValuesMap& Markers2 = timeline2->GetMarkers(); size_t numSA = GetTimeOfMarker("Static Marker A", Markers2).size(); TF_AXIOM(numSA == 2); size_t numSB = GetTimeOfMarker("Static Marker B", Markers2).size(); TF_AXIOM(numSB == 2); size_t numSC = GetTimeOfMarker("Static Marker C", Markers2).size(); TF_AXIOM(numSC == 2); size_t numDA = GetTimeOfMarker("Dynamic Marker A", Markers2).size(); TF_AXIOM(numDA == 2); size_t numDB = GetTimeOfMarker("Dynamic Marker B", Markers2).size(); TF_AXIOM(numDB == 2); size_t numDC = GetTimeOfMarker("Dynamic Marker C", Markers2).size(); TF_AXIOM(numDC == 2); // Test clearing reporter->ClearTree(); collector->SetEnabled(true); TestMarkerMacro(); collector->SetEnabled(false); reporter->ReportChromeTracing(std::cout); TraceEventTreeRefPtr timeline3 = reporter->GetEventTree(); const TraceEventTree::MarkerValuesMap& Markers3 = timeline3->GetMarkers(); numSA = GetTimeOfMarker("Static Marker A", Markers3).size(); TF_AXIOM(numSA == 1); numSB = GetTimeOfMarker("Static Marker B", Markers3).size(); TF_AXIOM(numSB == 1); numSC = GetTimeOfMarker("Static Marker C", Markers3).size(); TF_AXIOM(numSC == 1); numDA = GetTimeOfMarker("Dynamic Marker A", Markers3).size(); TF_AXIOM(numDA == 1); numDB = GetTimeOfMarker("Dynamic Marker B", Markers3).size(); TF_AXIOM(numDB == 1); numDC = GetTimeOfMarker("Dynamic Marker C", Markers3).size(); TF_AXIOM(numDC == 1); }
1,795
738
<gh_stars>100-1000 from django.http import HttpResponseNotAllowed def requires_post(func): """ Returns an HTTP 405 error if the request method isn't POST. """ def decorator(request, *args, **kwargs): if request.method == 'POST': return func(request, *args, **kwargs) return HttpResponseNotAllowed(['POST']) return decorator
142
543
<reponame>fredells/riiablo<filename>core/src/main/java/com/riiablo/video/VideoPacket.java package com.riiablo.video; import com.riiablo.io.ByteInput; public class VideoPacket implements Runnable { final ByteInput in; VideoPacket(ByteInput in) { this.in = in; } @Override public void run() { } private static final int[][] bink_tree_bits = { { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, }, { 0x00, 0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F, }, { 0x00, 0x02, 0x01, 0x09, 0x05, 0x15, 0x0D, 0x1D, 0x03, 0x13, 0x0B, 0x1B, 0x07, 0x17, 0x0F, 0x1F, }, { 0x00, 0x02, 0x06, 0x01, 0x09, 0x05, 0x0D, 0x1D, 0x03, 0x13, 0x0B, 0x1B, 0x07, 0x17, 0x0F, 0x1F, }, { 0x00, 0x04, 0x02, 0x06, 0x01, 0x09, 0x05, 0x0D, 0x03, 0x13, 0x0B, 0x1B, 0x07, 0x17, 0x0F, 0x1F, }, { 0x00, 0x04, 0x02, 0x0A, 0x06, 0x0E, 0x01, 0x09, 0x05, 0x0D, 0x03, 0x0B, 0x07, 0x17, 0x0F, 0x1F, }, { 0x00, 0x02, 0x0A, 0x06, 0x0E, 0x01, 0x09, 0x05, 0x0D, 0x03, 0x0B, 0x1B, 0x07, 0x17, 0x0F, 0x1F, }, { 0x00, 0x01, 0x05, 0x03, 0x13, 0x0B, 0x1B, 0x3B, 0x07, 0x27, 0x17, 0x37, 0x0F, 0x2F, 0x1F, 0x3F, }, { 0x00, 0x01, 0x03, 0x13, 0x0B, 0x2B, 0x1B, 0x3B, 0x07, 0x27, 0x17, 0x37, 0x0F, 0x2F, 0x1F, 0x3F, }, { 0x00, 0x01, 0x05, 0x0D, 0x03, 0x13, 0x0B, 0x1B, 0x07, 0x27, 0x17, 0x37, 0x0F, 0x2F, 0x1F, 0x3F, }, { 0x00, 0x02, 0x01, 0x05, 0x0D, 0x03, 0x13, 0x0B, 0x1B, 0x07, 0x17, 0x37, 0x0F, 0x2F, 0x1F, 0x3F, }, { 0x00, 0x01, 0x09, 0x05, 0x0D, 0x03, 0x13, 0x0B, 0x1B, 0x07, 0x17, 0x37, 0x0F, 0x2F, 0x1F, 0x3F, }, { 0x00, 0x02, 0x01, 0x03, 0x13, 0x0B, 0x1B, 0x3B, 0x07, 0x27, 0x17, 0x37, 0x0F, 0x2F, 0x1F, 0x3F, }, { 0x00, 0x01, 0x05, 0x03, 0x07, 0x27, 0x17, 0x37, 0x0F, 0x4F, 0x2F, 0x6F, 0x1F, 0x5F, 0x3F, 0x7F, }, { 0x00, 0x01, 0x05, 0x03, 0x07, 0x17, 0x37, 0x77, 0x0F, 0x4F, 0x2F, 0x6F, 0x1F, 0x5F, 0x3F, 0x7F, }, { 0x00, 0x02, 0x01, 0x05, 0x03, 0x07, 0x27, 0x17, 0x37, 0x0F, 0x2F, 0x6F, 0x1F, 0x5F, 0x3F, 0x7F, }, }; private static final int[][] bink_tree_lens = { { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 }, { 1, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 }, { 2, 2, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 }, { 2, 3, 3, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 }, { 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5 }, { 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5 }, { 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5 }, { 1, 3, 3, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }, { 1, 2, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }, { 1, 3, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6 }, { 2, 2, 3, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6 }, { 1, 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6 }, { 2, 2, 2, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }, { 1, 3, 3, 3, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7 }, { 1, 3, 3, 3, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 }, { 2, 2, 3, 3, 3, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7 }, }; private static final int[][] bink_patterns = { { 0x00, 0x08, 0x10, 0x18, 0x20, 0x28, 0x30, 0x38, 0x39, 0x31, 0x29, 0x21, 0x19, 0x11, 0x09, 0x01, 0x02, 0x0A, 0x12, 0x1A, 0x22, 0x2A, 0x32, 0x3A, 0x3B, 0x33, 0x2B, 0x23, 0x1B, 0x13, 0x0B, 0x03, 0x04, 0x0C, 0x14, 0x1C, 0x24, 0x2C, 0x34, 0x3C, 0x3D, 0x35, 0x2D, 0x25, 0x1D, 0x15, 0x0D, 0x05, 0x06, 0x0E, 0x16, 0x1E, 0x26, 0x2E, 0x36, 0x3E, 0x3F, 0x37, 0x2F, 0x27, 0x1F, 0x17, 0x0F, 0x07, }, { 0x3B, 0x3A, 0x39, 0x38, 0x30, 0x31, 0x32, 0x33, 0x2B, 0x2A, 0x29, 0x28, 0x20, 0x21, 0x22, 0x23, 0x1B, 0x1A, 0x19, 0x18, 0x10, 0x11, 0x12, 0x13, 0x0B, 0x0A, 0x09, 0x08, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x0F, 0x0E, 0x0D, 0x0C, 0x14, 0x15, 0x16, 0x17, 0x1F, 0x1E, 0x1D, 0x1C, 0x24, 0x25, 0x26, 0x27, 0x2F, 0x2E, 0x2D, 0x2C, 0x34, 0x35, 0x36, 0x37, 0x3F, 0x3E, 0x3D, 0x3C, }, { 0x19, 0x11, 0x12, 0x1A, 0x1B, 0x13, 0x0B, 0x03, 0x02, 0x0A, 0x09, 0x01, 0x00, 0x08, 0x10, 0x18, 0x20, 0x28, 0x30, 0x38, 0x39, 0x31, 0x29, 0x2A, 0x32, 0x3A, 0x3B, 0x33, 0x2B, 0x23, 0x22, 0x21, 0x1D, 0x15, 0x16, 0x1E, 0x1F, 0x17, 0x0F, 0x07, 0x06, 0x0E, 0x0D, 0x05, 0x04, 0x0C, 0x14, 0x1C, 0x24, 0x2C, 0x34, 0x3C, 0x3D, 0x35, 0x2D, 0x2E, 0x36, 0x3E, 0x3F, 0x37, 0x2F, 0x27, 0x26, 0x25, }, { 0x03, 0x0B, 0x02, 0x0A, 0x01, 0x09, 0x00, 0x08, 0x10, 0x18, 0x11, 0x19, 0x12, 0x1A, 0x13, 0x1B, 0x23, 0x2B, 0x22, 0x2A, 0x21, 0x29, 0x20, 0x28, 0x30, 0x38, 0x31, 0x39, 0x32, 0x3A, 0x33, 0x3B, 0x3C, 0x34, 0x3D, 0x35, 0x3E, 0x36, 0x3F, 0x37, 0x2F, 0x27, 0x2E, 0x26, 0x2D, 0x25, 0x2C, 0x24, 0x1C, 0x14, 0x1D, 0x15, 0x1E, 0x16, 0x1F, 0x17, 0x0F, 0x07, 0x0E, 0x06, 0x0D, 0x05, 0x0C, 0x04, }, { 0x18, 0x19, 0x10, 0x11, 0x08, 0x09, 0x00, 0x01, 0x02, 0x03, 0x0A, 0x0B, 0x12, 0x13, 0x1A, 0x1B, 0x1C, 0x1D, 0x14, 0x15, 0x0C, 0x0D, 0x04, 0x05, 0x06, 0x07, 0x0E, 0x0F, 0x16, 0x17, 0x1E, 0x1F, 0x27, 0x26, 0x2F, 0x2E, 0x37, 0x36, 0x3F, 0x3E, 0x3D, 0x3C, 0x35, 0x34, 0x2D, 0x2C, 0x25, 0x24, 0x23, 0x22, 0x2B, 0x2A, 0x33, 0x32, 0x3B, 0x3A, 0x39, 0x38, 0x31, 0x30, 0x29, 0x28, 0x21, 0x20, }, { 0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B, 0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B, 0x20, 0x21, 0x22, 0x23, 0x28, 0x29, 0x2A, 0x2B, 0x30, 0x31, 0x32, 0x33, 0x38, 0x39, 0x3A, 0x3B, 0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F, 0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F, 0x24, 0x25, 0x26, 0x27, 0x2C, 0x2D, 0x2E, 0x2F, 0x34, 0x35, 0x36, 0x37, 0x3C, 0x3D, 0x3E, 0x3F, }, { 0x06, 0x07, 0x0F, 0x0E, 0x0D, 0x05, 0x0C, 0x04, 0x03, 0x0B, 0x02, 0x0A, 0x09, 0x01, 0x00, 0x08, 0x10, 0x18, 0x11, 0x19, 0x12, 0x1A, 0x13, 0x1B, 0x14, 0x1C, 0x15, 0x1D, 0x16, 0x1E, 0x17, 0x1F, 0x27, 0x2F, 0x26, 0x2E, 0x25, 0x2D, 0x24, 0x2C, 0x23, 0x2B, 0x22, 0x2A, 0x21, 0x29, 0x20, 0x28, 0x31, 0x30, 0x38, 0x39, 0x3A, 0x32, 0x3B, 0x33, 0x3C, 0x34, 0x3D, 0x35, 0x36, 0x37, 0x3F, 0x3E, }, { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x2F, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x28, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38, }, { 0x00, 0x08, 0x09, 0x01, 0x02, 0x03, 0x0B, 0x0A, 0x12, 0x13, 0x1B, 0x1A, 0x19, 0x11, 0x10, 0x18, 0x20, 0x28, 0x29, 0x21, 0x22, 0x23, 0x2B, 0x2A, 0x32, 0x31, 0x30, 0x38, 0x39, 0x3A, 0x3B, 0x33, 0x34, 0x3C, 0x3D, 0x3E, 0x3F, 0x37, 0x36, 0x35, 0x2D, 0x2C, 0x24, 0x25, 0x26, 0x2E, 0x2F, 0x27, 0x1F, 0x17, 0x16, 0x1E, 0x1D, 0x1C, 0x14, 0x15, 0x0D, 0x0C, 0x04, 0x05, 0x06, 0x0E, 0x0F, 0x07, }, { 0x18, 0x19, 0x10, 0x11, 0x08, 0x09, 0x00, 0x01, 0x02, 0x03, 0x0A, 0x0B, 0x12, 0x13, 0x1A, 0x1B, 0x1C, 0x1D, 0x14, 0x15, 0x0C, 0x0D, 0x04, 0x05, 0x06, 0x07, 0x0E, 0x0F, 0x16, 0x17, 0x1E, 0x1F, 0x26, 0x27, 0x2E, 0x2F, 0x36, 0x37, 0x3E, 0x3F, 0x3C, 0x3D, 0x34, 0x35, 0x2C, 0x2D, 0x24, 0x25, 0x22, 0x23, 0x2A, 0x2B, 0x32, 0x33, 0x3A, 0x3B, 0x38, 0x39, 0x30, 0x31, 0x28, 0x29, 0x20, 0x21, }, { 0x00, 0x08, 0x01, 0x09, 0x02, 0x0A, 0x03, 0x0B, 0x13, 0x1B, 0x12, 0x1A, 0x11, 0x19, 0x10, 0x18, 0x20, 0x28, 0x21, 0x29, 0x22, 0x2A, 0x23, 0x2B, 0x33, 0x3B, 0x32, 0x3A, 0x31, 0x39, 0x30, 0x38, 0x3C, 0x34, 0x3D, 0x35, 0x3E, 0x36, 0x3F, 0x37, 0x2F, 0x27, 0x2E, 0x26, 0x2D, 0x25, 0x2C, 0x24, 0x1F, 0x17, 0x1E, 0x16, 0x1D, 0x15, 0x1C, 0x14, 0x0C, 0x04, 0x0D, 0x05, 0x0E, 0x06, 0x0F, 0x07, }, { 0x00, 0x08, 0x10, 0x18, 0x19, 0x1A, 0x1B, 0x13, 0x0B, 0x03, 0x02, 0x01, 0x09, 0x11, 0x12, 0x0A, 0x04, 0x0C, 0x14, 0x1C, 0x1D, 0x1E, 0x1F, 0x17, 0x0F, 0x07, 0x06, 0x05, 0x0D, 0x15, 0x16, 0x0E, 0x24, 0x2C, 0x34, 0x3C, 0x3D, 0x3E, 0x3F, 0x37, 0x2F, 0x27, 0x26, 0x25, 0x2D, 0x35, 0x36, 0x2E, 0x20, 0x28, 0x30, 0x38, 0x39, 0x3A, 0x3B, 0x33, 0x2B, 0x23, 0x22, 0x21, 0x29, 0x31, 0x32, 0x2A, }, { 0x00, 0x08, 0x09, 0x01, 0x02, 0x03, 0x0B, 0x0A, 0x13, 0x1B, 0x1A, 0x12, 0x11, 0x10, 0x18, 0x19, 0x21, 0x20, 0x28, 0x29, 0x2A, 0x22, 0x23, 0x2B, 0x33, 0x3B, 0x3A, 0x32, 0x31, 0x39, 0x38, 0x30, 0x34, 0x3C, 0x3D, 0x35, 0x36, 0x3E, 0x3F, 0x37, 0x2F, 0x27, 0x26, 0x2E, 0x2D, 0x2C, 0x24, 0x25, 0x1D, 0x1C, 0x14, 0x15, 0x16, 0x1E, 0x1F, 0x17, 0x0E, 0x0F, 0x07, 0x06, 0x05, 0x0D, 0x0C, 0x04, }, { 0x18, 0x10, 0x08, 0x00, 0x01, 0x02, 0x03, 0x0B, 0x13, 0x1B, 0x1A, 0x19, 0x11, 0x0A, 0x09, 0x12, 0x1C, 0x14, 0x0C, 0x04, 0x05, 0x06, 0x07, 0x0F, 0x17, 0x1F, 0x1E, 0x1D, 0x15, 0x0E, 0x0D, 0x16, 0x3C, 0x34, 0x2C, 0x24, 0x25, 0x26, 0x27, 0x2F, 0x37, 0x3F, 0x3E, 0x3D, 0x35, 0x2E, 0x2D, 0x36, 0x38, 0x30, 0x28, 0x20, 0x21, 0x22, 0x23, 0x2B, 0x33, 0x3B, 0x3A, 0x39, 0x31, 0x2A, 0x29, 0x32, }, { 0x00, 0x08, 0x09, 0x01, 0x02, 0x0A, 0x12, 0x11, 0x10, 0x18, 0x19, 0x1A, 0x1B, 0x13, 0x0B, 0x03, 0x07, 0x06, 0x0E, 0x0F, 0x17, 0x16, 0x15, 0x0D, 0x05, 0x04, 0x0C, 0x14, 0x1C, 0x1D, 0x1E, 0x1F, 0x3F, 0x3E, 0x36, 0x37, 0x2F, 0x2E, 0x2D, 0x35, 0x3D, 0x3C, 0x34, 0x2C, 0x24, 0x25, 0x26, 0x27, 0x38, 0x30, 0x31, 0x39, 0x3A, 0x32, 0x2A, 0x29, 0x28, 0x20, 0x21, 0x22, 0x23, 0x2B, 0x33, 0x3B, }, { 0x00, 0x01, 0x08, 0x09, 0x10, 0x11, 0x18, 0x19, 0x20, 0x21, 0x28, 0x29, 0x30, 0x31, 0x38, 0x39, 0x3A, 0x3B, 0x32, 0x33, 0x2A, 0x2B, 0x22, 0x23, 0x1A, 0x1B, 0x12, 0x13, 0x0A, 0x0B, 0x02, 0x03, 0x04, 0x05, 0x0C, 0x0D, 0x14, 0x15, 0x1C, 0x1D, 0x24, 0x25, 0x2C, 0x2D, 0x34, 0x35, 0x3C, 0x3D, 0x3E, 0x3F, 0x36, 0x37, 0x2E, 0x2F, 0x26, 0x27, 0x1E, 0x1F, 0x16, 0x17, 0x0E, 0x0F, 0x06, 0x07, } }; }
8,463
1,907
<gh_stars>1000+ // // This source file is part of appleseed. // Visit https://appleseedhq.net/ for additional information and resources. // // This software is released under the MIT license. // // Copyright (c) 2010-2013 <NAME>, Jupiter Jazz Limited // Copyright (c) 2014-2018 <NAME>, The appleseedhq Organization // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. // // appleseed.foundation headers. #include "foundation/math/permutation.h" #include "foundation/math/rng/mersennetwister.h" #include "foundation/utility/test.h" // Standard headers. #include <cstddef> #include <cstdlib> #include <cstring> using namespace foundation; TEST_SUITE(Foundation_Math_Permutation) { TEST_CASE(TestIsPermutationOnValidPermutation1) { const size_t Size = 5; const size_t Permutation[Size] = { 0, 1, 2, 3, 4 }; EXPECT_TRUE(is_permutation(Size, Permutation)); } TEST_CASE(TestIsPermutationOnValidPermutation2) { const size_t Size = 5; const size_t Permutation[Size] = { 3, 2, 4, 0, 1 }; EXPECT_TRUE(is_permutation(Size, Permutation)); } TEST_CASE(TestIsPermutationOnValidPermutation3) { const size_t Size = 5; const size_t Permutation[Size] = { 4, 3, 2, 1, 0 }; EXPECT_TRUE(is_permutation(Size, Permutation)); } TEST_CASE(TestIsPermutationOnInvalidPermutation1) { const size_t Size = 5; const size_t Permutation[Size] = { 0, 1, 2, 3, 3 }; EXPECT_FALSE(is_permutation(Size, Permutation)); } TEST_CASE(TestIsPermutationOnInvalidPermutation2) { const size_t Size = 5; const size_t Permutation[Size] = { 0, 1, 2, 3, 5 }; EXPECT_FALSE(is_permutation(Size, Permutation)); } TEST_CASE(TestIdentityPermutation) { const size_t Size = 5; const size_t Expected[Size] = { 0, 1, 2, 3, 4 }; size_t perm[Size]; identity_permutation(Size, perm); EXPECT_ARRAY_EQ(Expected, perm); } TEST_CASE(TestRandomPermutation) { const size_t Size = 5; size_t perm[Size]; MersenneTwister rng; random_permutation(Size, perm, rng); EXPECT_TRUE(is_permutation(Size, perm)); } TEST_CASE(TestReverseQMCPermutationSize1) { const size_t Size = 1; const size_t Expected[Size] = { 0 }; size_t perm[Size]; reverse_qmc_permutation(Size, perm); EXPECT_ARRAY_EQ(Expected, perm); } TEST_CASE(TestReverseQMCPermutationSize5) { const size_t Size = 5; const size_t Expected[Size] = { 0, 4, 3, 2, 1 }; size_t perm[Size]; reverse_qmc_permutation(Size, perm); EXPECT_ARRAY_EQ(Expected, perm); } TEST_CASE(TestFaureQMCPermutationSize2) { const size_t Size = 2; const size_t Expected[Size] = { 0, 1 }; size_t perm[Size]; faure_qmc_permutation(Size, perm); EXPECT_ARRAY_EQ(Expected, perm); } TEST_CASE(TestFaureQMCPermutationSize3) { const size_t Size = 3; const size_t Expected[Size] = { 0, 1, 2 }; size_t perm[Size]; faure_qmc_permutation(Size, perm); EXPECT_ARRAY_EQ(Expected, perm); } TEST_CASE(TestFaureQMCPermutationSize4) { const size_t Size = 4; const size_t Expected[Size] = { 0, 2, 1, 3 }; size_t perm[Size]; faure_qmc_permutation(Size, perm); EXPECT_ARRAY_EQ(Expected, perm); } TEST_CASE(TestFaureQMCPermutationSize5) { const size_t Size = 5; const size_t Expected[Size] = { 0, 3, 2, 1, 4 }; size_t perm[Size]; faure_qmc_permutation(Size, perm); EXPECT_ARRAY_EQ(Expected, perm); } TEST_CASE(TestFaureQMCPermutationSize6) { const size_t Size = 6; const size_t Expected[Size] = { 0, 2, 4, 1, 3, 5 }; size_t perm[Size]; faure_qmc_permutation(Size, perm); EXPECT_ARRAY_EQ(Expected, perm); } TEST_CASE(TestSmallItemReorder) { const size_t Size = 10; const size_t Order[Size] = { 1, 3, 5, 2, 7, 6, 0, 4, 9, 8 }; size_t items[Size] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }; size_t temp[Size]; small_item_reorder(items, temp, Order, Size); EXPECT_ARRAY_EQ(Order, items); } TEST_CASE(TestLargeItemReorderFirstVariant) { const size_t Size = 10; const size_t Order[Size] = { 1, 3, 5, 2, 7, 6, 0, 4, 9, 8 }; size_t items[Size] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }; size_t temp[Size]; large_item_reorder(items, temp, Order, Size); EXPECT_ARRAY_EQ(Order, items); } TEST_CASE(TestLargeItemReorderSecondVariant) { const size_t Size = 10; const size_t Order[Size] = { 1, 3, 5, 2, 7, 6, 0, 4, 9, 8 }; size_t items[Size] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }; size_t order[Size]; memcpy(order, Order, sizeof(Order)); large_item_reorder(items, order, Size); EXPECT_ARRAY_EQ(Order, items); } }
2,686
878
<reponame>lfkdsk/JustWeEngine package com.lfk.justweengine.utils.database; import android.content.ContentValues; import android.content.Context; import android.database.Cursor; import android.database.SQLException; import android.database.sqlite.SQLiteDatabase; import android.database.sqlite.SQLiteOpenHelper; import android.util.Log; import java.lang.reflect.Field; import java.util.ArrayList; import java.util.List; /** * Created by liufengkai on 16/3/18. */ public class DataBase { // 数据库相关信息类 private DataBaseMessage mDBMessage; // 数据库新建 private DataBaseHelper mDBHelper; // 实体数据库 private SQLiteDatabase mDB; public DataBase() { } public DataBase(DataBaseMessage mDBMessage) { this.mDBMessage = mDBMessage; } // 入口方法 public static DataBase initAndOpen(String name, Class<?> clazz) { DataBase dataBase = new DataBase(); dataBase.mDBMessage = dataBase.getCreateSQL(clazz); dataBase.mDBMessage.SQL_NAME = name; return dataBase; } public boolean open(Context context) { try { mDBHelper = new DataBaseHelper(context, mDBMessage.SQL_NAME, 1); mDB = mDBHelper.getWritableDatabase(); } catch (SQLException e) { e.printStackTrace(); return false; } return true; } public void close() { mDB.close(); mDBHelper.close(); } /** * 尺寸 * * @return 数据库存储条目 */ public int size() { int size = 0; Cursor mCursor = mDB.query(mDBMessage.TABLE_NAME, new String[]{mDBMessage.PRIMARY_KEY}, null, null, null, null, null, null); if (mCursor != null) { size = mCursor.getCount(); mCursor.close(); } return size; } /** * 插入条目 * * @param node * @return */ public boolean insert(Node node) { Log.e("node", node.toString()); ContentValues values = new ContentValues(); for (int i = 0; i < mDBMessage.LABEL_NAME.size(); i++) { if (node.arrayList.get(i) instanceof Integer) values.put(mDBMessage.LABEL_NAME.get(i), (Integer) node.arrayList.get(i)); else if (node.arrayList.get(i) instanceof String) values.put(mDBMessage.LABEL_NAME.get(i), (String) node.arrayList.get(i)); else if (node.arrayList.get(i) instanceof Float) values.put(mDBMessage.LABEL_NAME.get(i), (Float) node.arrayList.get(i)); else if (node.arrayList.get(i) instanceof Long) values.put(mDBMessage.LABEL_NAME.get(i), (Long) node.arrayList.get(i)); else if (node.arrayList.get(i) instanceof Boolean) values.put(mDBMessage.LABEL_NAME.get(i), (Boolean) node.arrayList.get(i)); } node.key = mDB.insert(mDBMessage.TABLE_NAME, null, values); if (node.key == -1) { Log.e("DATABASE", "db insert fail!"); return false; } return true; } /** * 更新 * * @param node * @return */ public boolean update(Node node) { if (node.key == -1) { return false; } ContentValues values = new ContentValues(); for (int i = 0; i < mDBMessage.LABEL_NAME.size(); i++) { if (node.arrayList.get(i) instanceof Integer) values.put(mDBMessage.LABEL_NAME.get(i), (Integer) node.arrayList.get(i)); else if (node.arrayList.get(i) instanceof String) values.put(mDBMessage.LABEL_NAME.get(i), (String) node.arrayList.get(i)); else if (node.arrayList.get(i) instanceof Float) values.put(mDBMessage.LABEL_NAME.get(i), (Float) node.arrayList.get(i)); else if (node.arrayList.get(i) instanceof Long) values.put(mDBMessage.LABEL_NAME.get(i), (Long) node.arrayList.get(i)); else if (node.arrayList.get(i) instanceof Boolean) values.put(mDBMessage.LABEL_NAME.get(i), (Boolean) node.arrayList.get(i)); } String condition = mDBMessage.PRIMARY_KEY + "=" + "\'" + node.key + "\'"; return update(values, condition, null); } protected boolean update(ContentValues values, String whereClause, String[] whereArgs) { int rows = mDB.update(mDBMessage.TABLE_NAME, values, whereClause, whereArgs); if (rows <= 0) { Log.d("DATABASE", "db update fail!"); return false; } return true; } /** * 删除指定条目 * * @param position * @return */ public boolean delete(int position) { long key = getKey(position, null); if (key == -1) { return false; } String condition = mDBMessage.PRIMARY_KEY + "=" + "\'" + key + "\'"; return delete(condition, null); } protected boolean delete(String whereClause, String[] whereArgs) { int rows = mDB.delete(mDBMessage.TABLE_NAME, whereClause, whereArgs); if (rows <= 0) { Log.e("DATABASE", "db delete fail!"); return false; } return true; } public boolean clear() { return delete(null, null); } /** * 一坨get方法 * * @param position * @return */ public List<Node> get(int position) { return get(position, null); } public List<Node> get(long id) { String condition = mDBMessage.PRIMARY_KEY + "=" + "\'" + id + "\'"; List<Node> notes = query(condition); if (notes.isEmpty()) { return null; } return notes; } public List<Node> get(int position, String condition) { Cursor cursor = mDB.query(mDBMessage.TABLE_NAME, null, condition, null, null, null, mDBMessage.PRIMARY_KEY + " DESC", null); List<Node> notes = extract(position, cursor); if (notes.isEmpty()) { return null; } return notes; } public List<Node> query() { Cursor cursor = mDB.query(mDBMessage.TABLE_NAME, null, null, null, null, null, mDBMessage.PRIMARY_KEY + " DESC", null); return extract(0, cursor); } public List<Node> query(String condition) { Cursor cursor = mDB.query(mDBMessage.TABLE_NAME, null, condition, null, null, null, mDBMessage.PRIMARY_KEY + " DESC", null); return extract(0, cursor); } public List<Node> query(int offset, int limit) { return query(null, offset, limit); } public List<Node> query(String condition, int offset, int limit) { Cursor cursor = mDB.query(mDBMessage.TABLE_NAME, null, condition, null, null, null, mDBMessage.PRIMARY_KEY + " DESC", offset + "," + limit); return extract(0, cursor); } /** * 从某个位置进行查询 * * @param offset * @param cursor * @return */ protected List<Node> extract(int offset, Cursor cursor) { List<Node> notes = new ArrayList<>(); if (cursor == null || cursor.getCount() <= offset) { return notes; } cursor.moveToFirst(); cursor.moveToPosition(offset); do { Node note = new Node(); note.key = cursor.getLong(cursor.getColumnIndex(mDBMessage.PRIMARY_KEY)); for (int i = 0; i < mDBMessage.LABEL_NAME.size(); i++) { note.arrayList.add(cursor.getColumnIndex(mDBMessage.LABEL_NAME.get(i))); } notes.add(note); } while (cursor.moveToNext()); cursor.close(); return notes; } /** * 获取存储键值 * * @param position 位置 * @param condition 信息 * @return */ protected long getKey(int position, String condition) { long key = -1; Cursor cursor = mDB.query(true, mDBMessage.TABLE_NAME, new String[]{mDBMessage.PRIMARY_KEY}, condition, null, null, null, mDBMessage.PRIMARY_KEY + " DESC", null); if (cursor != null && cursor.getCount() > 0) { cursor.moveToPosition(position); key = cursor.getLong(cursor.getColumnIndex(mDBMessage.PRIMARY_KEY)); cursor.close(); } return key; } private class DataBaseHelper extends SQLiteOpenHelper { public DataBaseHelper(Context context, String name, int version) { super(context, name, null, version); } public DataBaseHelper(Context context, String name, SQLiteDatabase.CursorFactory factory, int version) { super(context, name, factory, version); } @Override public void onCreate(SQLiteDatabase db) { db.execSQL(mDBMessage.CREATE_SQL); Log.d("create", mDBMessage.CREATE_SQL); } @Override public void onUpgrade(SQLiteDatabase db, int oldVersion, int newVersion) { db.execSQL("DROP TABLE IF EXISTS " + mDBMessage.TABLE_NAME); onCreate(db); } } /** * SQL辅助信息 */ public class DataBaseMessage { String SQL_NAME; // 创建 String CREATE_SQL; // 表名 String TABLE_NAME; // 主键 String PRIMARY_KEY; // 存放 ArrayList<String> LABEL_NAME; public DataBaseMessage() { LABEL_NAME = new ArrayList<>(); } } /** * 反射方法 * 通过注释类产生SQL语句 * * @param clazz * @return */ public DataBaseMessage getCreateSQL(Class<?> clazz) { DataBaseMessage msg = new DataBaseMessage(); StringBuilder builder = new StringBuilder(); builder.append("CREATE TABLE "); if (clazz.isAnnotationPresent(TableName.class)) { TableName t = clazz.getAnnotation(TableName.class); if (t.ifNotExist()) builder.append(" ").append("IF NOT EXISTS "); builder.append(t.tableName()); // table name msg.TABLE_NAME = t.tableName(); } builder.append(" ("); Field[] fields = clazz.getDeclaredFields(); for (int i = 0; i < fields.length; i++) { if (fields[i].isAnnotationPresent(LabelName.class)) { LabelName f = fields[i].getAnnotation(LabelName.class); builder.append(f.columnName()); builder.append(" ").append(f.type()); msg.LABEL_NAME.add(f.columnName()); if (f.generatedId()) { builder.append(" ").append("PRIMARY KEY"); msg.PRIMARY_KEY = f.columnName(); if (f.autoincrement()) { builder.append(" ").append("AUTOINCREMENT"); msg.LABEL_NAME.remove(msg.LABEL_NAME.size() - 1); } } builder.append(","); } } builder.delete(builder.length() - 1, builder.length()); builder.append(") "); msg.CREATE_SQL = builder.toString(); return msg; } }
5,380
605
//===-- TestTracer.h --------------------------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // Allows setting up a fake tracer for tests. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_TOOLS_EXTRA_CLANGD_UNITTESTS_SUPPORT_TESTTRACER_H #define LLVM_CLANG_TOOLS_EXTRA_CLANGD_UNITTESTS_SUPPORT_TESTTRACER_H #include "support/Trace.h" #include "llvm/ADT/StringMap.h" #include "llvm/ADT/StringRef.h" #include <mutex> #include <string> #include <vector> namespace clang { namespace clangd { namespace trace { /// A RAII Tracer that can be used by tests. class TestTracer : public EventTracer { public: TestTracer() : S(*this) {} /// Stores all the measurements to be returned with take later on. void record(const Metric &Metric, double Value, llvm::StringRef Label) override; /// Returns recorded measurements for \p Metric and clears them. std::vector<double> takeMetric(llvm::StringRef Metric, llvm::StringRef Label = ""); private: std::mutex Mu; /// Measurements recorded per metric per label. llvm::StringMap<llvm::StringMap<std::vector<double>>> Measurements; Session S; }; } // namespace trace } // namespace clangd } // namespace clang #endif
516
1,306
/* * Copyright (C) 2012 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "jni_compiler.h" #include "base/logging.h" #include "class_linker.h" #include "compiled_method.h" #include "dex_file-inl.h" #include "driver/compiler_driver.h" #include "driver/dex_compilation_unit.h" #include "llvm/compiler_llvm.h" #include "llvm/ir_builder.h" #include "llvm/llvm_compilation_unit.h" #include "llvm/runtime_support_llvm_func.h" #include "llvm/utils_llvm.h" #include "mirror/art_method.h" #include "runtime.h" #include "stack.h" #include "thread.h" #include <llvm/ADT/SmallVector.h> #include <llvm/IR/BasicBlock.h> #include <llvm/IR/DerivedTypes.h> #include <llvm/IR/Function.h> #include <llvm/IR/Type.h> namespace art { namespace llvm { using ::art::llvm::runtime_support::JniMethodEnd; using ::art::llvm::runtime_support::JniMethodEndSynchronized; using ::art::llvm::runtime_support::JniMethodEndWithReference; using ::art::llvm::runtime_support::JniMethodEndWithReferenceSynchronized; using ::art::llvm::runtime_support::JniMethodStart; using ::art::llvm::runtime_support::JniMethodStartSynchronized; using ::art::llvm::runtime_support::RuntimeId; JniCompiler::JniCompiler(LlvmCompilationUnit* cunit, CompilerDriver& driver, const DexCompilationUnit* dex_compilation_unit) : cunit_(cunit), driver_(&driver), module_(cunit_->GetModule()), context_(cunit_->GetLLVMContext()), irb_(*cunit_->GetIRBuilder()), dex_compilation_unit_(dex_compilation_unit), func_(NULL), elf_func_idx_(0) { // Check: Ensure that JNI compiler will only get "native" method CHECK(dex_compilation_unit->IsNative()); } CompiledMethod* JniCompiler::Compile() { const bool is_static = dex_compilation_unit_->IsStatic(); const bool is_synchronized = dex_compilation_unit_->IsSynchronized(); const DexFile* dex_file = dex_compilation_unit_->GetDexFile(); DexFile::MethodId const& method_id = dex_file->GetMethodId(dex_compilation_unit_->GetDexMethodIndex()); char const return_shorty = dex_file->GetMethodShorty(method_id)[0]; ::llvm::Value* this_object_or_class_object; uint32_t method_idx = dex_compilation_unit_->GetDexMethodIndex(); std::string func_name(StringPrintf("jni_%s", MangleForJni(PrettyMethod(method_idx, *dex_file)).c_str())); CreateFunction(func_name); // Set argument name ::llvm::Function::arg_iterator arg_begin(func_->arg_begin()); ::llvm::Function::arg_iterator arg_end(func_->arg_end()); ::llvm::Function::arg_iterator arg_iter(arg_begin); DCHECK_NE(arg_iter, arg_end); arg_iter->setName("method"); ::llvm::Value* method_object_addr = arg_iter++; if (!is_static) { // Non-static, the second argument is "this object" this_object_or_class_object = arg_iter++; } else { // Load class object this_object_or_class_object = irb_.LoadFromObjectOffset(method_object_addr, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), irb_.getJObjectTy(), kTBAAConstJObject); } // Actual argument (ignore method and this object) arg_begin = arg_iter; // Count the number of Object* arguments uint32_t sirt_size = 1; // "this" object pointer for non-static // "class" object pointer for static for (unsigned i = 0; arg_iter != arg_end; ++i, ++arg_iter) { #if !defined(NDEBUG) arg_iter->setName(StringPrintf("a%u", i)); #endif if (arg_iter->getType() == irb_.getJObjectTy()) { ++sirt_size; } } // Shadow stack ::llvm::StructType* shadow_frame_type = irb_.getShadowFrameTy(sirt_size); ::llvm::AllocaInst* shadow_frame_ = irb_.CreateAlloca(shadow_frame_type); // Store the dex pc irb_.StoreToObjectOffset(shadow_frame_, ShadowFrame::DexPCOffset(), irb_.getInt32(DexFile::kDexNoIndex), kTBAAShadowFrame); // Push the shadow frame ::llvm::Value* shadow_frame_upcast = irb_.CreateConstGEP2_32(shadow_frame_, 0, 0); ::llvm::Value* old_shadow_frame = irb_.Runtime().EmitPushShadowFrame(shadow_frame_upcast, method_object_addr, sirt_size); // Get JNIEnv ::llvm::Value* jni_env_object_addr = irb_.Runtime().EmitLoadFromThreadOffset(Thread::JniEnvOffset().Int32Value(), irb_.getJObjectTy(), kTBAARuntimeInfo); // Get callee code_addr ::llvm::Value* code_addr = irb_.LoadFromObjectOffset(method_object_addr, mirror::ArtMethod::NativeMethodOffset().Int32Value(), GetFunctionType(dex_compilation_unit_->GetDexMethodIndex(), is_static, true)->getPointerTo(), kTBAARuntimeInfo); // Load actual parameters std::vector< ::llvm::Value*> args; // The 1st parameter: JNIEnv* args.push_back(jni_env_object_addr); // Variables for GetElementPtr ::llvm::Value* gep_index[] = { irb_.getInt32(0), // No displacement for shadow frame pointer irb_.getInt32(1), // SIRT NULL, }; size_t sirt_member_index = 0; // Store the "this object or class object" to SIRT gep_index[2] = irb_.getInt32(sirt_member_index++); ::llvm::Value* sirt_field_addr = irb_.CreateBitCast(irb_.CreateGEP(shadow_frame_, gep_index), irb_.getJObjectTy()->getPointerTo()); irb_.CreateStore(this_object_or_class_object, sirt_field_addr, kTBAAShadowFrame); // Push the "this object or class object" to out args this_object_or_class_object = irb_.CreateBitCast(sirt_field_addr, irb_.getJObjectTy()); args.push_back(this_object_or_class_object); // Store arguments to SIRT, and push back to args for (arg_iter = arg_begin; arg_iter != arg_end; ++arg_iter) { if (arg_iter->getType() == irb_.getJObjectTy()) { // Store the reference type arguments to SIRT gep_index[2] = irb_.getInt32(sirt_member_index++); ::llvm::Value* sirt_field_addr = irb_.CreateBitCast(irb_.CreateGEP(shadow_frame_, gep_index), irb_.getJObjectTy()->getPointerTo()); irb_.CreateStore(arg_iter, sirt_field_addr, kTBAAShadowFrame); // Note null is placed in the SIRT but the jobject passed to the native code must be null // (not a pointer into the SIRT as with regular references). ::llvm::Value* equal_null = irb_.CreateICmpEQ(arg_iter, irb_.getJNull()); ::llvm::Value* arg = irb_.CreateSelect(equal_null, irb_.getJNull(), irb_.CreateBitCast(sirt_field_addr, irb_.getJObjectTy())); args.push_back(arg); } else { args.push_back(arg_iter); } } ::llvm::Value* saved_local_ref_cookie; { // JniMethodStart RuntimeId func_id = is_synchronized ? JniMethodStartSynchronized : JniMethodStart; ::llvm::SmallVector< ::llvm::Value*, 2> args; if (is_synchronized) { args.push_back(this_object_or_class_object); } args.push_back(irb_.Runtime().EmitGetCurrentThread()); saved_local_ref_cookie = irb_.CreateCall(irb_.GetRuntime(func_id), args); } // Call!!! ::llvm::Value* retval = irb_.CreateCall(code_addr, args); { // JniMethodEnd bool is_return_ref = return_shorty == 'L'; RuntimeId func_id = is_return_ref ? (is_synchronized ? JniMethodEndWithReferenceSynchronized : JniMethodEndWithReference) : (is_synchronized ? JniMethodEndSynchronized : JniMethodEnd); ::llvm::SmallVector< ::llvm::Value*, 4> args; if (is_return_ref) { args.push_back(retval); } args.push_back(saved_local_ref_cookie); if (is_synchronized) { args.push_back(this_object_or_class_object); } args.push_back(irb_.Runtime().EmitGetCurrentThread()); ::llvm::Value* decoded_jobject = irb_.CreateCall(irb_.GetRuntime(func_id), args); // Return decoded jobject if return reference. if (is_return_ref) { retval = decoded_jobject; } } // Pop the shadow frame irb_.Runtime().EmitPopShadowFrame(old_shadow_frame); // Return! switch (return_shorty) { case 'V': irb_.CreateRetVoid(); break; case 'Z': case 'C': irb_.CreateRet(irb_.CreateZExt(retval, irb_.getInt32Ty())); break; case 'B': case 'S': irb_.CreateRet(irb_.CreateSExt(retval, irb_.getInt32Ty())); break; default: irb_.CreateRet(retval); break; } // Verify the generated bitcode VERIFY_LLVM_FUNCTION(*func_); cunit_->Materialize(); return new CompiledMethod(*driver_, cunit_->GetInstructionSet(), cunit_->GetElfObject(), func_name); } void JniCompiler::CreateFunction(const std::string& func_name) { CHECK_NE(0U, func_name.size()); const bool is_static = dex_compilation_unit_->IsStatic(); // Get function type ::llvm::FunctionType* func_type = GetFunctionType(dex_compilation_unit_->GetDexMethodIndex(), is_static, false); // Create function func_ = ::llvm::Function::Create(func_type, ::llvm::Function::InternalLinkage, func_name, module_); // Create basic block ::llvm::BasicBlock* basic_block = ::llvm::BasicBlock::Create(*context_, "B0", func_); // Set insert point irb_.SetInsertPoint(basic_block); } ::llvm::FunctionType* JniCompiler::GetFunctionType(uint32_t method_idx, bool is_static, bool is_native_function) { // Get method signature uint32_t shorty_size; const char* shorty = dex_compilation_unit_->GetShorty(&shorty_size); CHECK_GE(shorty_size, 1u); // Get return type ::llvm::Type* ret_type = NULL; switch (shorty[0]) { case 'V': ret_type = irb_.getJVoidTy(); break; case 'Z': case 'B': case 'C': case 'S': case 'I': ret_type = irb_.getJIntTy(); break; case 'F': ret_type = irb_.getJFloatTy(); break; case 'J': ret_type = irb_.getJLongTy(); break; case 'D': ret_type = irb_.getJDoubleTy(); break; case 'L': ret_type = irb_.getJObjectTy(); break; default: LOG(FATAL) << "Unreachable: unexpected return type in shorty " << shorty; } // Get argument type std::vector< ::llvm::Type*> args_type; args_type.push_back(irb_.getJObjectTy()); // method object pointer if (!is_static || is_native_function) { // "this" object pointer for non-static // "class" object pointer for static naitve args_type.push_back(irb_.getJType('L')); } for (uint32_t i = 1; i < shorty_size; ++i) { args_type.push_back(irb_.getJType(shorty[i])); } return ::llvm::FunctionType::get(ret_type, args_type, false); } } // namespace llvm } // namespace art
4,953
27,416
<filename>community/boilerplates/event-triggers/aws-lambda/java/echo/src/main/java/example/Note.java package example; class Note { private Integer id; private String note; Integer getId() { return id; } void setId(Integer id) { this.id = id; } String getNote() { return note; } void setNote(String note) { this.note = note; } }
174
2,670
<gh_stars>1000+ /*--------------------------------------------------------------------------------------------- * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See License.txt in the project root for license information. *--------------------------------------------------------------------------------------------*/ // Do not edit this file. It is machine generated. { "refresh.process.list.tooltip": "Prozessliste aktualisieren", "attach.to.process": "An Prozess anhängen", "select.process.attach": "Prozess auswählen, an den angefügt werden soll", "process.not.selected": "Der Prozess wurde nicht ausgewählt." }
175