max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
47,880 | <filename>guava-testlib/src/com/google/common/collect/testing/DerivedGenerator.java
/*
* Copyright (C) 2012 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.collect.testing;
import com.google.common.annotations.GwtCompatible;
/**
* A generator that relies on a preexisting generator for most of its work. For example, a derived
* iterator generator may delegate the work of creating the underlying collection to an inner
* collection generator.
*
* <p>{@code GwtTestSuiteGenerator} expects every {@code DerivedIterator} implementation to provide
* a one-arg constructor accepting its inner generator as an argument). This requirement enables it
* to generate source code (since GWT cannot use reflection to generate the suites).
*
* @author <NAME>
*/
@GwtCompatible
public interface DerivedGenerator {
TestSubjectGenerator<?> getInnerGenerator();
}
| 366 |
302 | <gh_stars>100-1000
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "randen.h"
#include <stdio.h>
#include <algorithm>
#include <random> // seed_seq
#include <sstream>
#define UPDATE_GOLDEN 0
#define ENABLE_VERIFY 1
#define ENABLE_DUMP 0
namespace randen {
namespace {
#define STR(x) #x
#define ASSERT_TRUE(condition) \
do { \
if (!(condition)) { \
printf("Assertion [" STR(condition) "] failed on line %d\n", __LINE__); \
abort(); \
} \
} while (false)
using EngRanden = Randen<uint64_t>;
#if ENABLE_VERIFY
void VerifyReseedChangesAllValues() {
const size_t kNumOutputs = 127;
EngRanden engine;
std::seed_seq seq1{1, 2, 3, 4, 5, 6, 7};
engine.seed(seq1);
uint64_t out1[kNumOutputs];
for (size_t i = 0; i < kNumOutputs; ++i) {
out1[i] = engine();
}
std::seed_seq seq2{127, 255, 511};
engine.seed(seq2);
uint64_t out2[kNumOutputs];
engine.seed(seq2);
for (size_t i = 0; i < kNumOutputs; ++i) {
out2[i] = engine();
ASSERT_TRUE(out2[i] != out1[i]);
}
}
void VerifyDiscard() {
const int N = 56; // two buffer's worth
for (int num_used = 0; num_used < N; ++num_used) {
EngRanden engine_used;
for (int i = 0; i < num_used; ++i) {
(void)engine_used();
}
for (int num_discard = 0; num_discard < N; ++num_discard) {
EngRanden engine1 = engine_used;
EngRanden engine2 = engine_used;
for (int i = 0; i < num_discard; ++i) {
(void)engine1();
}
engine2.discard(num_discard);
for (int i = 0; i < N; ++i) {
const uint64_t r1 = engine1();
const uint64_t r2 = engine2();
ASSERT_TRUE(r1 == r2);
}
}
}
}
void VerifyGolden() {
// prime number => some buffer values unused.
const size_t kNumOutputs = 127;
#if UPDATE_GOLDEN
EngRanden engine;
for (size_t i = 0; i < kNumOutputs; ++i) {
printf("0x%016lx,\n", engine());
}
printf("\n");
#else
const uint64_t golden[kNumOutputs] = {
0xdda9f47cd90410ee, 0xc3c14f134e433977, 0xf0b780f545c72912,
0x887bf3087fd8ca10, 0x30ec63baff3c6d59, 0x15dbb1d37696599f,
0x02808a316f49a54c, 0xb29f73606f7f20a6, 0x9cbf605e3fd9de8a,
0x3b8feaf9d5c8e50e, 0xd8b2ffd356301ed5, 0xc970ae1a78183bbb,
0xcdfd8d76eb8f9a19, 0xf4b327fe0fc73c37, 0xd5af05dd3eff9556,
0xc3a506eb91420c9d, 0x7023920e0d6bfe8c, 0x48db1bb78f83c4a1,
0xed1ef4c26b87b840, 0x58d3575834956d42, 0x497cabf3431154fc,
0x8eef32a23e0b2df3, 0xd88b5749f090e5ea, 0x4e24370570029a8b,
0x78fcec2cbb6342f5, 0xc651a582a970692f, 0x352ee4ad1816afe3,
0x463cb745612f55db, 0x811ef0821c3de851, 0x026ff374c101da7e,
0xa0660379992d58fc, 0x6f7e616704c4fa59, 0x915f3445685da798,
0x04b0a374a3b795c7, 0x4663352533ce1882, 0x26802a8ac76571ce,
0x5588ba3a4d6e6c51, 0xb9fdefb4a24dc738, 0x607195a5e200f5fd,
0xa2101a42d35f1956, 0xe1e5e03c759c0709, 0x7e100308f3290764,
0xcbcf585399e432f1, 0x082572cc5da6606f, 0x0904469acbfee8f2,
0xe8a2be4f8335d8f1, 0x08e8a1f1a69da69a, 0xf08bd31b6daecd51,
0x2e9705bb053d6b46, 0x6542a20aad57bff5, 0x78e3a810213b6ffb,
0xda2fc9db0713c391, 0xc0932718cd55781f, 0xdc16a59cdd85f8a6,
0xb97289c1be0f2f9c, 0xb9bfb29c2b20bfe5, 0x5524bb834771435b,
0xc0a2a0e403a892d4, 0xff4af3ab8d1b78c5, 0x8265da3d39d1a750,
0x66e455f627495189, 0xf0ec5f424bcad77f, 0x3424e47dc22596e3,
0xc82d3120b57e3270, 0xc191c595afc4dcbf, 0xbc0c95129ccedcdd,
0x7f90650ea6cd6ab4, 0x120392bd2bb70939, 0xa7c8fac5a7917eb0,
0x7287491832695ad3, 0x7c1bf9839c7c1ce5, 0xd088cb9418be0361,
0x78565cdefd28c4ad, 0xe2e991fa58e1e79e, 0x2a9eac28b08c96bf,
0x7351b9fef98bafad, 0x13a685861bab87e0, 0x6c4f179696cb2225,
0x30537425cac70991, 0x64c6de5aa0501971, 0x7e05e3aa8ec720dc,
0x01590d9dc6c532b7, 0x738184388f3bc1d2, 0x74a07d9c54e3e63f,
0x6bcdf185561f255f, 0x26ffdc5067be3acb, 0x171df81934f68604,
0xa0eaf2e1cf99b1c6, 0x5d1cb02075ba1cea, 0x7ea5a21665683e5a,
0xba6364eff80de02f, 0x957f38cbd2123fdf, 0x892d8317de82f7a2,
0x606e0a0e41d452ee, 0x4eb28826766fcf5b, 0xe707b1db50f7b43e,
0x6ee217df16527d78, 0x5a362d56e80a0951, 0x443e63857d4076ca,
0xf6737962ba6b23dd, 0xd796b052151ee94d, 0x790d9a5f048adfeb,
0x8b833ff84893da5d, 0x033ed95c12b04a03, 0x9877c4225061ca76,
0x3d6724b1bb15eab9, 0x42e5352fe30ce989, 0xd68d6810adf74fb3,
0x3cdbf7e358df4b8b, 0x265b565a7431fde7, 0x52d2242f65b37f88,
0x2922a47f6d3e8779, 0x29d40f00566d5e26, 0x5d836d6e2958d6b5,
0x6c056608b7d9c1b6, 0x288db0e1124b14a0, 0x8fb946504faa6c9d,
0x0b9471bdb8f19d32, 0xfd1fe27d144a09e0, 0x8943a9464540251c,
0x8048f217633fce36, 0xea6ac458da141bda, 0x4334b8b02ff7612f,
0xfeda1384ade74d31, 0x096d119a3605c85b, 0xdbc8441f5227e216,
0x541ad7efa6ddc1d3};
EngRanden engine;
for (size_t i = 0; i < kNumOutputs; ++i) {
ASSERT_TRUE(golden[i] == engine());
}
#endif
}
#endif // ENABLE_VERIFY
void VerifyRandReqEngine() {
// Validates that Randen satisfies [rand.req.engine].
// Names after definition of [rand.req.engine] in C++ standard.
// e is a value of E
// v is a lvalue of E
// x, y are possibly const values of E
// s is a value of T
// q is a value satisfying requirements of seed_sequence
// z is a value of type unsigned long long
// os is a some specialization of basic_ostream
// is is a some specialization of basic_istream
using E = EngRanden;
using T = typename EngRanden::result_type;
static_assert(std::is_copy_constructible<E>::value,
"Randen must be copy constructible");
static_assert(std::is_copy_assignable<E>::value,
"Randen must be copy assignable");
E e, v;
const E x, y;
T s = 1;
std::seed_seq q{1, 2, 3};
unsigned long long z = 1; // NOLINT(runtime/int)
std::wostringstream os;
std::wistringstream is;
E{};
E{x};
E{s};
E{q};
// Verify that seed() and default-construct is identical.
e.seed();
{
E f;
ASSERT_TRUE(e == f);
}
// Verify the seed() result type.
static_assert(std::is_same<decltype(e.seed(s)), void>::value,
"return type of seed() must be void");
static_assert(std::is_same<decltype(e.seed(q)), void>::value,
"return type of seed() must be void");
// verify that seed via seed_sequence and construct via seed_sequence
// is identical.
e.seed(q);
{
E f{q};
ASSERT_TRUE(e == f);
}
// Verify the operator() result type.
static_assert(std::is_same<decltype(e()), T>::value,
"return type of operator() must be result_type");
// Verify that once the state has advanced that the engines
// are no longer equal.
e();
{
E f{q};
ASSERT_TRUE(e != f);
}
{
E f;
ASSERT_TRUE(e != f);
}
// Verify discard.
e.discard(z);
{
// The state equivalence should change.
E f, g;
f.discard(2);
ASSERT_TRUE(f != g);
g();
g();
ASSERT_TRUE(f == g);
}
// Verify operator == result types.
static_assert(std::is_same<decltype(x == y), bool>::value,
"return type of operator== must be bool");
static_assert(std::is_same<decltype(x != y), bool>::value,
"return type of operator!= must be bool");
// Verify operator<<() result.
{
auto& os2 = (os << e);
ASSERT_TRUE(&os2 == &os);
}
// Verify operator>>() result.
{
auto& is2 = (is >> e);
ASSERT_TRUE(&is2 == &is);
}
}
void VerifyStreamOperators() {
EngRanden engine1(171);
EngRanden engine2;
{
std::stringstream stream;
stream << engine1;
stream >> engine2;
}
const int N = 56; // two buffer's worth
for (int i = 0; i < N; ++i) {
const uint64_t r1 = engine1();
const uint64_t r2 = engine2();
ASSERT_TRUE(r1 == r2);
}
}
void Verify() {
#if ENABLE_VERIFY
VerifyReseedChangesAllValues();
VerifyDiscard();
VerifyGolden();
VerifyRandReqEngine();
VerifyStreamOperators();
#endif
}
void DumpOutput() {
#if ENABLE_DUMP
const size_t kNumOutputs = 1500 * 1000 * 1000;
std::vector<uint64_t> outputs(kNumOutputs);
EngRanden engine;
for (size_t i = 0; i < kNumOutputs; ++i) {
outputs[i] = engine();
}
FILE* f = fopen("/tmp/randen.bin", "wb");
if (f != nullptr) {
fwrite(outputs.data(), kNumOutputs, 8, f);
fclose(f);
}
#endif // ENABLE_DUMP
}
void RunAll() {
// Immediately output any results (for non-local runs).
setvbuf(stdout, nullptr, _IONBF, 0);
Verify();
DumpOutput();
}
} // namespace
} // namespace randen
int main(int argc, char* argv[]) {
randen::RunAll();
return 0;
}
| 4,726 |
1,927 | <reponame>alimy/scene<gh_stars>1000+
/*
* Copyright (C) 2019 ByteDance Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.bytedance.scene;
import android.app.Activity;
import android.app.Application;
import androidx.annotation.MainThread;
import androidx.annotation.NonNull;
import androidx.lifecycle.ViewModelProvider;
public class SceneViewModelProviders {
private SceneViewModelProviders() {
}
private static Application checkApplication(Activity activity) {
Application application = activity.getApplication();
if (application == null) {
throw new IllegalStateException("Your activity is not yet attached to "
+ "Application. You can't request ViewModel before onCreate call.");
}
return application;
}
private static Activity checkActivity(Scene scene) {
Activity activity = scene.getActivity();
if (activity == null) {
throw new IllegalStateException("Can't create ViewModelProvider for removed scene");
}
return activity;
}
@MainThread
public static ViewModelProvider of(@NonNull Scene scene) {
ViewModelProvider.AndroidViewModelFactory factory =
ViewModelProvider.AndroidViewModelFactory.getInstance(
checkApplication(checkActivity(scene)));
return new ViewModelProvider(scene.getViewModelStore(), factory);
}
@MainThread
public static ViewModelProvider of(@NonNull Scene scene, @NonNull ViewModelProvider.Factory factory) {
return new ViewModelProvider(scene.getViewModelStore(), factory);
}
}
| 693 |
335 | <reponame>Safal08/Hacktoberfest-1
{
"word": "Ligand",
"definitions": [
"An ion or molecule attached to a metal atom by coordinate bonding.",
"A molecule that binds to another (usually larger) molecule."
],
"parts-of-speech": "Noun"
} | 104 |
2,637 | <reponame>vinh107108/amazon-freertos<filename>demos/device_defender_for_aws/defender_demo.c
/*
* FreeRTOS V202107.00
* Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://aws.amazon.com/freertos
* http://www.FreeRTOS.org
*/
/* Standard includes. */
#include <stdlib.h>
#include <string.h>
/* Demo config. */
#include "defender_demo_config.h"
/* Metrics collector. */
#include "metrics_collector.h"
/* Report builder. */
#include "report_builder.h"
/* MQTT operations. */
#include "mqtt_demo_helpers.h"
/* JSON Library. */
#include "core_json.h"
/* Device Defender Client Library. */
#include "defender.h"
/**
* @brief Predefined thing name.
*
* This is the example predefine thing name and could be compiled in ROM code.
*/
#define THING_NAME clientcredentialIOT_THING_NAME
/**
* @brief The length of #THING_NAME.
*/
#define THING_NAME_LENGTH ( ( uint16_t ) ( sizeof( THING_NAME ) - 1 ) )
/**
* @brief Number of seconds to wait for the response from AWS IoT Device
* Defender service.
*/
#define DEFENDER_RESPONSE_WAIT_SECONDS ( 2 )
/**
* @brief The maximum number of times to run the loop in this demo.
*/
#ifndef DEFENDER_DEMO_MAX_ATTEMPTS
#define DEFENDER_DEMO_MAX_ATTEMPTS ( 3 )
#endif
/**
* @brief Time in ticks to wait between each iteration of the demo execution,
* in case a retry is required from demo execution failure.
*/
#define DELAY_BETWEEEN_DEMO_ATTEMPTS_TICKS ( pdMS_TO_TICKS( 5000U ) )
/**
* @brief Status values of the device defender report.
*/
typedef enum
{
ReportStatusNotReceived,
ReportStatusAccepted,
ReportStatusRejected
} ReportStatus_t;
/**
* @brief Each compilation unit that consumes the NetworkContext must define it.
* It should contain a single pointer to the type of your desired transport.
* When using multiple transports in the same compilation unit, define this pointer as void *.
*
* @note Transport stacks are defined in amazon-freertos/libraries/abstractions/transport/secure_sockets/transport_secure_sockets.h.
*/
struct NetworkContext
{
SecureSocketsTransportParams_t * pParams;
};
/*-----------------------------------------------------------*/
/**
* @brief The MQTT context used for MQTT operation.
*/
static MQTTContext_t mqttContext;
/**
* @brief The network context used for OpenSSL operation.
*/
static NetworkContext_t networkContext;
/**
* @brief Static buffer used to hold MQTT messages being sent and received.
*/
static uint8_t sharedBuffer[ NETWORK_BUFFER_SIZE ];
/**
* @brief Static buffer used to hold MQTT messages being sent and received.
*/
static MQTTFixedBuffer_t xBuffer =
{
sharedBuffer,
NETWORK_BUFFER_SIZE
};
/**
* @brief Network Stats.
*/
static NetworkStats_t networkStats;
/**
* @brief Open TCP ports array.
*/
static uint16_t openTcpPorts[ OPEN_TCP_PORTS_ARRAY_SIZE ];
/**
* @brief Open UDP ports array.
*/
static uint16_t openUdpPorts[ OPEN_UDP_PORTS_ARRAY_SIZE ];
/**
* @brief Established connections array.
*/
static Connection_t establishedConnections[ ESTABLISHED_CONNECTIONS_ARRAY_SIZE ];
/**
* @brief All the metrics sent in the device defender report.
*/
static ReportMetrics_t deviceMetrics;
/**
* @brief Report status.
*/
static ReportStatus_t reportStatus;
/**
* @brief Buffer for generating the device defender report.
*/
static char deviceMetricsJsonReport[ DEVICE_METRICS_REPORT_BUFFER_SIZE ];
/**
* @brief Report Id sent in the defender report.
*/
static uint32_t reportId = 0;
/*-----------------------------------------------------------*/
/**
* @brief Callback to receive the incoming publish messages from the MQTT broker.
*
* @param[in] pMqttContext MQTT context pointer.
* @param[in] pPacketInfo Information on the type of incoming MQTT packet.
* @param[in] pDeserializedInfo Deserialized information from incoming packet.
*/
static void publishCallback( MQTTContext_t * pMqttContext,
MQTTPacketInfo_t * pPacketInfo,
MQTTDeserializedInfo_t * pDeserializedInfo );
/**
* @brief Collect all the metrics to be sent in the device defender report.
*
* On success, caller is responsible for freeing deviceMetrics.pTaskStatusArray.
*
* @return pdPASS if all the metrics are successfully collected;
* pdFAIL otherwise.
*/
static BaseType_t collectDeviceMetrics( void );
/**
* @brief Generate the device defender report.
*
* @param[out] pOutReportLength Length of the device defender report.
*
* @return pdPASS if the report is generated successfully;
* pdFAIL otherwise.
*/
static BaseType_t generateDeviceMetricsReport( size_t * pOutReportLength );
/**
* @brief Subscribe to the device defender topics.
*
* @param[in] pMqttContext MQTT context pointer.
*
* @return pdPASS if the subscribe is successful;
* pdFAIL otherwise.
*/
static BaseType_t subscribeToDefenderTopics( MQTTContext_t * pMqttContext );
/**
* @brief Unsubscribe from the device defender topics.
*
* @param[in] pMqttContext MQTT context pointer.
*
* @return pdPASS if the unsubscribe is successful;
* pdFAIL otherwise.
*/
static BaseType_t unsubscribeFromDefenderTopics( MQTTContext_t * pMqttContext );
/**
* @brief Validate the response received from the AWS IoT Device Defender Service.
*
* This functions checks that a valid JSON is received and the value of reportId
* is same as was sent in the published report.
*
* @param[in] defenderResponse The defender response to validate.
* @param[in] defenderResponseLength Length of the defender response.
*
* @return true if the response is valid;
* false otherwise.
*/
static bool validateDefenderResponse( const char * defenderResponse,
size_t defenderResponseLength );
/*-----------------------------------------------------------*/
static bool validateDefenderResponse( const char * defenderResponse,
size_t defenderResponseLength )
{
bool status = false;
JSONStatus_t jsonResult = JSONSuccess;
char * reportIdString = NULL;
size_t reportIdStringLength;
uint32_t reportIdInResponse;
/* Is the response a valid JSON? */
jsonResult = JSON_Validate( defenderResponse, defenderResponseLength );
if( jsonResult != JSONSuccess )
{
LogError( ( "Invalid response from AWS IoT Device Defender Service: %.*s.",
( int ) defenderResponseLength,
defenderResponse ) );
}
if( jsonResult == JSONSuccess )
{
/* Search for the reportId key in the response. */
jsonResult = JSON_Search( ( char * ) defenderResponse,
defenderResponseLength,
"reportId",
sizeof( "reportId" ) - 1,
&( reportIdString ),
&( reportIdStringLength ) );
if( jsonResult != JSONSuccess )
{
LogError( ( "reportId key not found in the response from the"
"AWS IoT Device Defender Service: %.*s.",
( int ) defenderResponseLength,
defenderResponse ) );
}
}
if( jsonResult == JSONSuccess )
{
reportIdInResponse = ( uint32_t ) strtoul( reportIdString, NULL, 10 );
/* Is the reportId present in the response same as was sent in the
* published report? */
if( reportIdInResponse == reportId )
{
LogInfo( ( "A valid response with reportId %lu received from the "
"AWS IoT Device Defender Service.",
( unsigned long ) reportId ) );
status = true;
}
else
{
LogError( ( "Unexpected reportId found in the response from the AWS"
"IoT Device Defender Service. Expected: %lu, Found: %lu, "
"Complete Response: %.*s.",
( unsigned long ) reportIdInResponse,
( unsigned long ) reportId,
( int ) defenderResponseLength,
defenderResponse ) );
}
}
return status;
}
/*-----------------------------------------------------------*/
static void publishCallback( MQTTContext_t * pMqttContext,
MQTTPacketInfo_t * pPacketInfo,
MQTTDeserializedInfo_t * pDeserializedInfo )
{
DefenderStatus_t status;
DefenderTopic_t api;
bool validationResult;
MQTTPublishInfo_t * pPublishInfo = pDeserializedInfo->pPublishInfo;
/* Silence compiler warnings about unused variables. */
( void ) pMqttContext;
/* Handle incoming publish. The lower 4 bits of the publish packet
* type is used for the dup, QoS, and retain flags. Hence masking
* out the lower bits to check if the packet is publish. */
if( ( pPacketInfo->type & 0xF0U ) == MQTT_PACKET_TYPE_PUBLISH )
{
status = Defender_MatchTopic( pPublishInfo->pTopicName,
pPublishInfo->topicNameLength,
&( api ),
NULL,
NULL );
if( status == DefenderSuccess )
{
if( api == DefenderJsonReportAccepted )
{
/* Check if the response is valid and is for the report we published. */
validationResult = validateDefenderResponse( pPublishInfo->pPayload,
pPublishInfo->payloadLength );
if( validationResult == true )
{
LogInfo( ( "The defender report was accepted by the service. Response: %.*s.",
( int ) pPublishInfo->payloadLength,
( const char * ) pPublishInfo->pPayload ) );
reportStatus = ReportStatusAccepted;
}
}
else if( api == DefenderJsonReportRejected )
{
/* Check if the response is valid and is for the report we published. */
validationResult = validateDefenderResponse( pPublishInfo->pPayload,
pPublishInfo->payloadLength );
if( validationResult == true )
{
LogError( ( "The defender report was rejected by the service. Response: %.*s.",
( int ) pPublishInfo->payloadLength,
( const char * ) pPublishInfo->pPayload ) );
reportStatus = ReportStatusRejected;
}
}
else
{
LogError( ( "Unexpected defender API : %d.", api ) );
}
}
else
{
LogError( ( "Unexpected publish message received. Topic: %.*s, Payload: %.*s.",
( int ) pPublishInfo->topicNameLength,
( const char * ) pPublishInfo->pTopicName,
( int ) pPublishInfo->payloadLength,
( const char * ) ( pPublishInfo->pPayload ) ) );
}
}
else
{
vHandleOtherIncomingPacket( pPacketInfo, pDeserializedInfo->packetIdentifier );
}
}
/*-----------------------------------------------------------*/
static BaseType_t collectDeviceMetrics( void )
{
BaseType_t status = pdFAIL;
MetricsCollectorStatus_t metricsCollectorStatus;
size_t numOpenTcpPorts, numOpenUdpPorts, numEstablishedConnections;
UBaseType_t tasksWritten = 0U;
TaskStatus_t taskStatus = { 0 };
TaskStatus_t * pTaskStatusArray = NULL;
UBaseType_t numTasksRunning;
/* Collect bytes and packets sent and received. */
metricsCollectorStatus = GetNetworkStats( &( networkStats ) );
if( metricsCollectorStatus != MetricsCollectorSuccess )
{
LogError( ( "GetNetworkStats failed. Status: %d.",
metricsCollectorStatus ) );
}
/* Collect a list of open TCP ports. */
if( metricsCollectorStatus == MetricsCollectorSuccess )
{
metricsCollectorStatus = GetOpenTcpPorts( &( openTcpPorts[ 0 ] ),
OPEN_TCP_PORTS_ARRAY_SIZE,
&( numOpenTcpPorts ) );
if( metricsCollectorStatus != MetricsCollectorSuccess )
{
LogError( ( "GetOpenTcpPorts failed. Status: %d.",
metricsCollectorStatus ) );
}
}
/* Collect a list of open UDP ports. */
if( metricsCollectorStatus == MetricsCollectorSuccess )
{
metricsCollectorStatus = GetOpenUdpPorts( &( openUdpPorts[ 0 ] ),
OPEN_UDP_PORTS_ARRAY_SIZE,
&( numOpenUdpPorts ) );
if( metricsCollectorStatus != MetricsCollectorSuccess )
{
LogError( ( "GetOpenUdpPorts failed. Status: %d.",
metricsCollectorStatus ) );
}
}
/* Collect a list of established connections. */
if( metricsCollectorStatus == MetricsCollectorSuccess )
{
metricsCollectorStatus = GetEstablishedConnections( &( establishedConnections[ 0 ] ),
ESTABLISHED_CONNECTIONS_ARRAY_SIZE,
&( numEstablishedConnections ) );
if( metricsCollectorStatus != MetricsCollectorSuccess )
{
LogError( ( "GetEstablishedConnections failed. Status: %d.",
metricsCollectorStatus ) );
}
}
if( metricsCollectorStatus == MetricsCollectorSuccess )
{
/* Get task count */
numTasksRunning = uxTaskGetNumberOfTasks();
/* Allocate pTaskStatusArray */
pTaskStatusArray = pvPortMalloc( numTasksRunning * sizeof( TaskStatus_t ) );
if( pTaskStatusArray == NULL )
{
LogError( ( "Cannot allocate memory for pTaskStatusArray array: pvPortMalloc() failed." ) );
metricsCollectorStatus = MetricsCollectorCollectionFailed;
}
}
/* Collect custom metrics from the system to send to AWS IoT Device Defender.
* This demo sends this task's stack high water mark as a "number" type
* custom metric and the current task ids as a "list of numbers" type custom
* metric. */
if( metricsCollectorStatus == MetricsCollectorSuccess )
{
/* Get the current task's status information. The usStackHighWaterMark
* field of the task status will be included in the report as a "number"
* custom metric. */
vTaskGetInfo(
/* NULL has the function query this task. */
NULL,
&taskStatus,
/* Include the stack high water mark value. */
pdTRUE,
/* Don't include the task state in the TaskStatus_t structure. */
0 );
/* Get the task status information for all running tasks. The task IDs
* of each task is then extracted to include in the report as a "list of
* numbers" custom metric */
tasksWritten = uxTaskGetSystemState( pTaskStatusArray, numTasksRunning, NULL );
if( tasksWritten == 0 )
{
/* If 0 is returned, the buffer was too small. This line is reached
* when we hit the race condition where tasks have been added since
* we got the result of uxTaskGetNumberOfTasks() */
metricsCollectorStatus = MetricsCollectorCollectionFailed;
LogError( ( "Failed to collect task IDs. uxTaskGetSystemState() failed due to insufficient buffer space." ) );
}
}
/* Populate device metrics. */
if( metricsCollectorStatus == MetricsCollectorSuccess )
{
status = pdPASS;
deviceMetrics.pNetworkStats = &( networkStats );
deviceMetrics.pOpenTcpPortsArray = &( openTcpPorts[ 0 ] );
deviceMetrics.openTcpPortsArrayLength = numOpenTcpPorts;
deviceMetrics.pOpenUdpPortsArray = &( openUdpPorts[ 0 ] );
deviceMetrics.openUdpPortsArrayLength = numOpenUdpPorts;
deviceMetrics.pEstablishedConnectionsArray = &( establishedConnections[ 0 ] );
deviceMetrics.establishedConnectionsArrayLength = numEstablishedConnections;
deviceMetrics.stackHighWaterMark = taskStatus.usStackHighWaterMark;
deviceMetrics.pTaskStatusArray = pTaskStatusArray;
deviceMetrics.taskStatusArrayLength = tasksWritten;
}
else
{
/* Free pTaskStatusArray if we allocated it but did not add it to the
* deviceMetrics stuct. */
if( pTaskStatusArray != NULL )
{
vPortFree( pTaskStatusArray );
}
}
return status;
}
/*-----------------------------------------------------------*/
static BaseType_t subscribeToDefenderTopics( MQTTContext_t * pMqttContext )
{
BaseType_t status = pdFAIL;
status = SubscribeToTopic( pMqttContext,
DEFENDER_API_JSON_ACCEPTED( THING_NAME ),
DEFENDER_API_LENGTH_JSON_ACCEPTED( THING_NAME_LENGTH ) );
if( status == pdPASS )
{
status = SubscribeToTopic( pMqttContext,
DEFENDER_API_JSON_REJECTED( THING_NAME ),
DEFENDER_API_LENGTH_JSON_REJECTED( THING_NAME_LENGTH ) );
}
return status;
}
/*-----------------------------------------------------------*/
static BaseType_t unsubscribeFromDefenderTopics( MQTTContext_t * pMqttContext )
{
BaseType_t status = pdFAIL;
status = UnsubscribeFromTopic( pMqttContext,
DEFENDER_API_JSON_ACCEPTED( THING_NAME ),
DEFENDER_API_LENGTH_JSON_ACCEPTED( THING_NAME_LENGTH ) );
if( status == pdPASS )
{
status = UnsubscribeFromTopic( pMqttContext,
DEFENDER_API_JSON_REJECTED( THING_NAME ),
DEFENDER_API_LENGTH_JSON_REJECTED( THING_NAME_LENGTH ) );
}
return status;
}
/*-----------------------------------------------------------*/
static BaseType_t generateDeviceMetricsReport( size_t * pOutReportLength )
{
BaseType_t status = pdFAIL;
ReportBuilderStatus_t reportBuilderStatus;
/* Generate the metrics report in the format expected by the AWS IoT Device
* Defender Service. */
reportBuilderStatus = GenerateJsonReport( &( deviceMetricsJsonReport[ 0 ] ),
DEVICE_METRICS_REPORT_BUFFER_SIZE,
&( deviceMetrics ),
DEVICE_METRICS_REPORT_MAJOR_VERSION,
DEVICE_METRICS_REPORT_MINOR_VERSION,
reportId,
pOutReportLength );
if( reportBuilderStatus != ReportBuilderSuccess )
{
LogError( ( "GenerateJsonReport failed. Status: %d.",
reportBuilderStatus ) );
}
else
{
LogDebug( ( "Generated Report: %.*s.",
( int ) ( *pOutReportLength ),
&( deviceMetricsJsonReport[ 0 ] ) ) );
status = pdPASS;
}
return status;
}
/*-----------------------------------------------------------*/
/**
* @brief The function that runs the Defender demo, called by the demo runner.
*
* @param[in] awsIotMqttMode Ignored for the Defender demo.
* @param[in] pIdentifier Ignored for the Defender demo.
* @param[in] pNetworkServerInfo Ignored for the Defender demo.
* @param[in] pNetworkCredentialInfo Ignored for the Defender demo.
* @param[in] pNetworkInterface Ignored for the Defender demo.
*
* @return `EXIT_SUCCESS` if the demo completes successfully; `EXIT_FAILURE` otherwise.
*/
int RunDeviceDefenderDemo( bool awsIotMqttMode,
const char * pIdentifier,
void * pNetworkServerInfo,
void * pNetworkCredentialInfo,
const void * pNetworkInterface )
{
BaseType_t demoStatus = pdFAIL;
size_t reportLength = 0U, i;
bool mqttSessionEstablished = false;
UBaseType_t demoRunCount = 0;
BaseType_t retryDemoLoop = pdFALSE;
( void ) awsIotMqttMode;
( void ) pIdentifier;
( void ) pNetworkServerInfo;
( void ) pNetworkCredentialInfo;
( void ) pNetworkInterface;
/* This demo runs a single loop unless there are failures in the demo execution.
* In case of failures in the demo execution, demo loop will be retried for up to
* DEFENDER_DEMO_MAX_ATTEMPTS times. */
do
{
/* Start with report not received. */
reportStatus = ReportStatusNotReceived;
/* Set a report Id to be used.
*
* !!!NOTE!!!
* This demo sets the report ID to xTaskGetTickCount(), which may collide
* if the device is reset. Reports for a Thing with a previously used
* report ID will be assumed to be duplicates and discarded by the Device
* Defender service. The report ID needs to be unique per report sent with
* a given Thing. We recommend using an increasing unique id such as the
* current timestamp. */
reportId = ( uint32_t ) xTaskGetTickCount();
LogInfo( ( "Establishing MQTT session..." ) );
demoStatus = EstablishMqttSession( &mqttContext,
&networkContext,
&xBuffer,
publishCallback );
if( demoStatus == pdFAIL )
{
LogError( ( "Failed to establish MQTT session." ) );
}
else
{
mqttSessionEstablished = true;
}
if( demoStatus == pdPASS )
{
LogInfo( ( "Subscribing to defender topics..." ) );
demoStatus = subscribeToDefenderTopics( &mqttContext );
if( demoStatus == pdFAIL )
{
LogError( ( "Failed to subscribe to defender topics." ) );
}
}
if( demoStatus == pdPASS )
{
LogInfo( ( "Collecting device metrics..." ) );
demoStatus = collectDeviceMetrics();
if( demoStatus == pdFAIL )
{
LogError( ( "Failed to collect device metrics." ) );
}
}
if( demoStatus == pdPASS )
{
LogInfo( ( "Generating device defender report..." ) );
demoStatus = generateDeviceMetricsReport( &( reportLength ) );
/* Free the allocated array in deviceMetrics struct which is not
* used anymore after generateDeviceMetricsReport(). This code is
* only reached when collectDeviceMetrics succeeded, so
* deviceMetrics.pTaskStatusArray is a valid allocation that needs
* to be freed. */
vPortFree( deviceMetrics.pTaskStatusArray );
if( demoStatus == pdFAIL )
{
LogError( ( "Failed to generate device defender report." ) );
}
}
if( demoStatus == pdPASS )
{
LogInfo( ( "Publishing device defender report..." ) );
demoStatus = PublishToTopic( &mqttContext,
DEFENDER_API_JSON_PUBLISH( THING_NAME ),
DEFENDER_API_LENGTH_JSON_PUBLISH( THING_NAME_LENGTH ),
&( deviceMetricsJsonReport[ 0 ] ),
reportLength );
if( demoStatus == pdFAIL )
{
LogError( ( "Failed to publish device defender report." ) );
}
}
if( demoStatus == pdPASS )
{
/* Note that PublishToTopic already called MQTT_ProcessLoop, therefore
* responses may have been received and the publishCallback may have
* been called. */
for( i = 0; i < DEFENDER_RESPONSE_WAIT_SECONDS; i++ )
{
/* reportStatus is updated in the publishCallback. */
if( reportStatus != ReportStatusNotReceived )
{
break;
}
( void ) ProcessLoop( &mqttContext, 1000 );
}
}
if( reportStatus == ReportStatusNotReceived )
{
LogError( ( "Failed to receive response from AWS IoT Device Defender Service." ) );
demoStatus = pdFAIL;
}
/* Unsubscribe and disconnect if MQTT session was established. Per the MQTT
* protocol spec, it is okay to send UNSUBSCRIBE even if no corresponding
* subscription exists on the broker. Therefore, it is okay to attempt
* unsubscribe even if one more subscribe failed earlier. */
if( mqttSessionEstablished )
{
LogInfo( ( "Unsubscribing from defender topics..." ) );
demoStatus = unsubscribeFromDefenderTopics( &mqttContext );
if( demoStatus == pdFAIL )
{
LogError( ( "Failed to unsubscribe from defender topics." ) );
}
LogInfo( ( "Closing MQTT session..." ) );
( void ) DisconnectMqttSession( &mqttContext, &networkContext );
}
/* Increment the demo run count. */
demoRunCount++;
if( ( demoStatus == pdPASS ) && ( reportStatus == ReportStatusAccepted ) )
{
LogInfo( ( "Demo completed successfully." ) );
/* Reset the flag for demo retry. */
retryDemoLoop = pdFALSE;
}
else
{
demoStatus = pdFAIL;
if( demoRunCount < DEFENDER_DEMO_MAX_ATTEMPTS )
{
LogWarn( ( "Demo iteration %lu failed. Retrying...",
( unsigned long ) demoRunCount ) );
retryDemoLoop = pdTRUE;
/* Clear the flag indicating successful MQTT session establishment
* before attempting a retry. */
mqttSessionEstablished = false;
LogInfo( ( "A short delay before the next demo iteration." ) );
vTaskDelay( DELAY_BETWEEEN_DEMO_ATTEMPTS_TICKS );
}
else
{
LogError( ( "All %lu demo iterations failed.",
( unsigned long ) DEFENDER_DEMO_MAX_ATTEMPTS ) );
retryDemoLoop = pdFALSE;
}
}
} while( retryDemoLoop == pdTRUE );
return( ( demoStatus == pdPASS ) ? EXIT_SUCCESS : EXIT_FAILURE );
}
/*-----------------------------------------------------------*/
| 12,185 |
473 | <reponame>pingjuiliao/cb-multios
#!/usr/bin/env python
#
# Copyright (C) 2014 <NAME> <<EMAIL>>
#
from generator.actions import Actions
import random
class Contact():
first = ""
def genderToStr(self):
return {"M":"Male","F":"Female","T":"Trans-Gender"}.setdefault(self.gender, "Other")
def __init__(self, first=None, last=None, phone=None, office=None, gender=None, hacker=None):
if first != None:
self.first = first
self.last = last
self.phone = phone
self.office = office
self.gender = gender
self.hacker = hacker
return
men = '<NAME>'.split()
women = '<NAME> <NAME>'.split()
last = '<NAME> <NAME> <NAME> <NAME> <NAME> Underwood Phelps <NAME>'.split()
self.gender = random.choice(["M", "F", "T", "?"])
if self.gender == 'M':
self.first = random.choice(men)
elif self.gender == 'F':
self.first = random.choice(women)
else:
self.first = random.choice(random.choice([men, women]))
self.last = random.choice(last)
self.phone = "(%0.3d)%0.3d-%0.4d" % (random.randrange(0, 999), random.randrange(0, 999), random.randrange(0, 9999))
self.office = random.randrange(0,9999)
self.hacker = random.choice("yn")
class Yolodex(Actions):
addState = 0
def genInitialContacts(self):
self.contacts = []
self.contacts.append(Contact(first="Robert", last="Morris", phone="(617)253-5982", office=32972, gender='M', hacker='y'))
self.contacts.append(Contact("Chareth", "Cutestory", "(123)456-7890", 4242, 'M', 'n'))
self.contacts.append(Contact("Tim", "Berners-Lee", "(638)844-2074", 404, 'M','y'))
self.contacts.append(Contact("Pwnies", "McPwn", "(222)223-1337", 31337, 'F','y'))
self.contacts.append(Contact("Jenny", "Fakename", "(555)867-5309", 31337, 'F','n'))
def start(self):
self.read(delim='\n', expect='Welcome to the yolodex.')
self.genInitialContacts()
def add(self):
self.write("@\n")
def addBadHacker(self):
self.newContact = Contact()
self.read(delim=": ", expect="First")
self.write(self.newContact.first + "\n")
self.read(delim=": ", expect="Last")
self.write(self.newContact.last + "\n")
self.read(delim=": ", expect="Phone Number")
self.write(self.newContact.phone + "\n")
self.read(delim=": ", expect="Office Number")
self.write(str(self.newContact.office) + "\n")
self.read(delim=": ", expect="Gender")
self.write(self.newContact.gender + "\n")
self.read(delim=": ", expect="Hacker")
hacker = random.choice("abcdefghijklmopqrstuvwxz01234567890-!@#$%^&*()")
self.write(hacker + "\n")
self.read(delim="\n", expect="Please enter y or n for hacker.")
self.read(delim=": ", expect="Hacker")
self.write(self.newContact.hacker + "\n")
def addBadName(self):
self.newContact = Contact()
bad = random.choice(["first", "last"])
self.read(delim=": ", expect="First: ")
self.write((self.newContact.first if bad == "last" else self.newContact.first.lower()) + "\n")
self.read(delim=": ", expect="Last: ")
self.write((self.newContact.last.lower() if bad == "last" else self.newContact.last) + "\n")
self.addState = 3
self.finishAdd()
self.read(delim="\n", expect="Invalid %s name. Must begin with a capital letter." % bad)
self.addState = 1
self.finishAdd()
def addBadPhone(self):
self.newContact = Contact()
self.read(delim=": ", expect="First")
self.write(self.newContact.first + "\n")
self.read(delim=": ", expect="Last")
self.write(self.newContact.last + "\n")
self.read(delim=": ", expect="Phone Number")
self.write(''.join(random.choice('abcdefghiklmnopqrstuvwxyz0123456789-)') for _ in range(12)))
self.write("\n")
self.addState = 4
self.finishAdd()
def mainmenu(self):
pass
def showinvalid(self):
self.write(random.choice('abcdfghijklmorstuvwxyz0123456789-)') + "\n")
self.showGoodUser()
def finishAdd(self):
if self.addState <= 1:
self.read(delim=": ", expect="First")
self.write(self.newContact.first + "\n")
if self.addState <= 2:
self.read(delim=": ", expect="Last")
self.write(self.newContact.last + "\n")
if self.addState <= 3:
self.read(delim=": ", expect="Phone Number")
self.write(self.newContact.phone + "\n")
if self.addState <= 4:
self.read(delim=": ", expect="Office Number")
self.write(str(self.newContact.office) + "\n")
if self.addState <= 5:
self.read(delim=": ", expect="Gender")
self.write(self.newContact.gender + "\n")
if self.addState <= 6:
self.read(delim=": ", expect="Hacker")
self.write(self.newContact.hacker + "\n")
def addGood(self):
self.newContact = Contact()
self.addState = 1
self.finishAdd()
self.contacts.append(self.newContact)
def userLookup(self, first, last):
self.read(delim=": ", expect="First")
self.write(first + "\n")
self.read(delim=": ", expect="Last")
self.write(last + "\n")
def delete(self):
try:
picked = random.choice(self.contacts)
except IndexError:
return
self.write("A\n")
self.userLookup(picked.first, picked.last)
self.read(delim="\n", expect="Contact removed.")
self.contacts.remove(picked)
def edit(self):
try:
picked = random.choice(self.contacts)
except IndexError:
return
new = Contact()
self.write("B\n")
self.userLookup(picked.first, picked.last)
self.read(delim="\n", expect="Updating fields. Send just a newline to keep old data.")
self.read(delim=": ", expect="New first name")
self.write(new.first + "\n")
self.read(delim=": ", expect="New last name")
self.write(new.last + "\n")
self.read(delim=": ", expect="New phone number")
self.write(new.phone + "\n")
self.read(delim=": ", expect="New office number")
self.write(str(new.office) + "\n")
self.read(delim=": ", expect="New gender")
self.write(new.gender + "\n")
self.read(delim="? ", expect="Is the user a hacker")
self.write(new.hacker + "\n")
self.contacts[self.contacts.index(picked)] = new
def badcommand(self):
self.write(random.choice("qwertyuiopasdfghjklzxcvbnm,.[];'/1234567890!#$%^&*()-=_+") + "\n")
self.read(delim="\n", expect="Unrecognized Command.")
def showmenu(self):
try:
self.picked = random.choice(self.contacts)
except IndexError:
self.picked = None
return
self.write("C\n")
def showprev(self):
if not getattr(self, "picked", None):
return
self.write("p\n")
if self.contacts.index(self.picked) == 0:
self.read(delim="\n", expect="No previous contact.")
else:
self.picked = self.contacts[self.contacts.index(self.picked)-1]
self.showGoodUser()
def shownext(self):
if not getattr(self, "picked", None):
return
self.write("n\n")
if self.contacts.index(self.picked) == len(self.contacts)-1:
self.read(delim="\n", expect="No next contact.")
else:
self.picked = self.contacts[self.contacts.index(self.picked)+1]
self.showGoodUser()
def showexit(self):
if len(self.contacts) == 0:
return
self.write("q\n")
def showBadUser(self):
if not getattr(self, "picked", None):
return
baduser = Contact()
self.userLookup(baduser.first, baduser.last)
self.read(delim="\n", expect="No such contact.")
def showGoodUser(self):
if not getattr(self, "picked", None):
return
picked = self.picked
self.picked = None
self.userLookup(picked.first, picked.last)
self.read(delim="\n", expect="*"*40)
self.read(delim="\t", expect="First name:")
self.read(delim="\n", expect=picked.first)
self.read(delim="\t", expect="Last name:")
self.read(delim="\n", expect=picked.last)
self.read(delim="\t", expect="Phone num:")
self.read(delim="\n", expect=picked.phone)
self.read(delim="\t", expect="Office Number:")
self.read(delim="\n", expect=str(picked.office))
self.read(delim="\t\t", expect="Gender:")
self.read(delim="\n", expect=picked.genderToStr())
if picked.hacker == 'y':
self.read(delim="\n", expect="[\x1b[31m!\x1b[0m]This user known to be a hacker[\x1b[31m!\x1b[0m]")
self.read(delim="\n", expect="*"*40)
def showdelete(self):
if not getattr(self, "picked", None):
return
index = self.contacts.index(self.picked)
if index == 0 and len(self.contacts) == 0:
self.picked = None
return
self.write("d\n")
if index == len(self.contacts)-1:
self.picked = self.contacts[index-2]
else:
self.picked = self.contacts[index]
self.showGoodUser()
def showlist(self):
self.write("D\n")
for c in self.contacts:
self.read(delim="\n", expect="="*40)
self.read(delim="\t", expect="First Name:")
self.read(delim="\n", expect=c.first)
self.read(delim="\t", expect="Last Name:")
self.read(delim="\n", expect=c.last)
self.read(delim="\t", expect="Phone Number:")
self.read(delim="\n", expect=c.phone)
if len(self.contacts) == 0:
self.read(delim="\n", expect="Empty yolodex.")
self.read(delim="\n", expect="="*40)
def sortContacts(self, a, b):
return cmp(a.last, b.last)
def sort(self):
self.contacts = sorted(self.contacts, self.sortContacts)
self.write("E\n")
def exit(self):
self.write("F\n")
self.read(delim="\n", expect="Thank you for using the yolodex.")
| 3,690 |
348 | {"nom":"Tournefeuille","circ":"6ème circonscription","dpt":"Haute-Garonne","inscrits":20884,"abs":9153,"votants":11731,"blancs":95,"nuls":59,"exp":11577,"res":[{"nuance":"REM","nom":"Mme <NAME>","voix":5006},{"nuance":"FI","nom":"M. <NAME>","voix":1531},{"nuance":"LR","nom":"<NAME>","voix":1401},{"nuance":"SOC","nom":"Mme <NAME>","voix":1357},{"nuance":"FN","nom":"Mme <NAME>","voix":944},{"nuance":"ECO","nom":"M. <NAME>","voix":546},{"nuance":"COM","nom":"<NAME>","voix":165},{"nuance":"DIV","nom":"M. <NAME>","voix":133},{"nuance":"DLF","nom":"M. <NAME>","voix":109},{"nuance":"DIV","nom":"M. <NAME>","voix":79},{"nuance":"DIV","nom":"Mme <NAME>","voix":78},{"nuance":"DIV","nom":"M. <NAME>","voix":69},{"nuance":"DVG","nom":"M. <NAME>","voix":66},{"nuance":"EXD","nom":"<NAME>","voix":52},{"nuance":"EXG","nom":"Mme <NAME>","voix":41},{"nuance":"ECO","nom":"M. <NAME>","voix":0}]} | 360 |
1,875 | /*
* Copyright 2020 <NAME>.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Copyright (c) 2007-present, <NAME> & <NAME>
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of JSR-310 nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.threeten.bp.chrono;
import static org.threeten.bp.temporal.ChronoField.ERA;
import java.util.Locale;
import org.threeten.bp.DateTimeException;
import org.threeten.bp.format.DateTimeFormatterBuilder;
import org.threeten.bp.format.TextStyle;
import org.threeten.bp.temporal.ChronoField;
import org.threeten.bp.temporal.ChronoUnit;
import org.threeten.bp.temporal.Temporal;
import org.threeten.bp.temporal.TemporalField;
import org.threeten.bp.temporal.TemporalQueries;
import org.threeten.bp.temporal.TemporalQuery;
import org.threeten.bp.temporal.UnsupportedTemporalTypeException;
import org.threeten.bp.temporal.ValueRange;
/**
* An era in the ISO calendar system.
* <p>
* The ISO-8601 standard does not define eras.
* A definition has therefore been created with two eras - 'Current era' (CE) for
* years from 0001-01-01 (ISO) and 'Before current era' (BCE) for years before that.
* <p>
* <b>Do not use {@code ordinal()} to obtain the numeric representation of {@code IsoEra}.
* Use {@code getValue()} instead.</b>
*
* <h3>Specification for implementors</h3>
* This is an immutable and thread-safe enum.
*/
public enum IsoEra implements Era {
/**
* The singleton instance for the era BCE, 'Before Current Era'.
* The 'ISO' part of the name emphasizes that this differs from the BCE
* era in the Gregorian calendar system.
* This has the numeric value of {@code 0}.
*/
BCE,
/**
* The singleton instance for the era CE, 'Current Era'.
* The 'ISO' part of the name emphasizes that this differs from the CE
* era in the Gregorian calendar system.
* This has the numeric value of {@code 1}.
*/
CE;
//-----------------------------------------------------------------------
/**
* Obtains an instance of {@code IsoEra} from an {@code int} value.
* <p>
* {@code IsoEra} is an enum representing the ISO eras of BCE/CE.
* This factory allows the enum to be obtained from the {@code int} value.
*
* @param era the BCE/CE value to represent, from 0 (BCE) to 1 (CE)
* @return the era singleton, not null
* @throws DateTimeException if the value is invalid
*/
public static IsoEra of(int era) {
switch (era) {
case 0:
return BCE;
case 1:
return CE;
default:
throw new DateTimeException("Invalid era: " + era);
}
}
//-----------------------------------------------------------------------
/**
* Gets the numeric era {@code int} value.
* <p>
* The era BCE has the value 0, while the era CE has the value 1.
*
* @return the era value, from 0 (BCE) to 1 (CE)
*/
@Override
public int getValue() {
return ordinal();
}
//-----------------------------------------------------------------------
@Override
public boolean isSupported(TemporalField field) {
if (field instanceof ChronoField) {
return field == ERA;
}
return field != null && field.isSupportedBy(this);
}
@Override
public ValueRange range(TemporalField field) {
if (field == ERA) {
return field.range();
} else if (field instanceof ChronoField) {
throw new UnsupportedTemporalTypeException("Unsupported field: " + field);
}
return field.rangeRefinedBy(this);
}
@Override
public int get(TemporalField field) {
if (field == ERA) {
return getValue();
}
return range(field).checkValidIntValue(getLong(field), field);
}
@Override
public long getLong(TemporalField field) {
if (field == ERA) {
return getValue();
} else if (field instanceof ChronoField) {
throw new UnsupportedTemporalTypeException("Unsupported field: " + field);
}
return field.getFrom(this);
}
//-------------------------------------------------------------------------
@Override
public Temporal adjustInto(Temporal temporal) {
return temporal.with(ERA, getValue());
}
@SuppressWarnings("unchecked")
@Override
public <R> R query(TemporalQuery<R> query) {
if (query == TemporalQueries.precision()) {
return (R) ChronoUnit.ERAS;
}
if (query == TemporalQueries.chronology() || query == TemporalQueries.zone()
|| query == TemporalQueries.zoneId() || query == TemporalQueries.offset()
|| query == TemporalQueries.localDate() || query == TemporalQueries.localTime()) {
return null;
}
return query.queryFrom(this);
}
//-----------------------------------------------------------------------
@Override
public String getDisplayName(TextStyle style, Locale locale) {
return new DateTimeFormatterBuilder().appendText(ERA, style).toFormatter(locale).format(this);
}
}
| 2,464 |
7,801 | <reponame>jiqimaogou/kotlin
/*
* Copyright 2010-2019 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license
* that can be found in the LICENSE file.
*/
#include "CoverageMappingC.h"
#include <llvm/ProfileData/Coverage/CoverageMapping.h>
#include <llvm/ProfileData/Coverage/CoverageMappingWriter.h>
#include <llvm/IR/GlobalVariable.h>
#include <llvm/Support/raw_ostream.h>
#include <llvm/ADT/Triple.h>
#include <llvm/IR/LLVMContext.h>
#include <llvm/IR/Module.h>
#include <llvm/IR/Type.h>
#include <llvm/IR/DerivedTypes.h>
#include <llvm/IR/Constants.h>
#include <llvm/IR/Intrinsics.h>
#include <llvm/IR/LegacyPassManager.h>
#include <llvm/Analysis/TargetLibraryInfo.h>
#include <llvm/Transforms/Instrumentation.h>
#include <llvm/Support/FileSystem.h>
#include <llvm/Support/Path.h>
#include <utility>
#include <string>
#include <vector>
#include <iostream>
#include <iomanip>
using namespace llvm;
using namespace llvm::coverage;
namespace llvm {
DEFINE_SIMPLE_CONVERSION_FUNCTIONS(TargetLibraryInfoImpl, LLVMTargetLibraryInfoRef)
}
struct LLVMFunctionCoverage {
explicit LLVMFunctionCoverage(std::string coverageData) : coverageData(std::move(coverageData)) {}
std::string coverageData;
};
static coverage::CounterMappingRegion::RegionKind determineRegionKind(const struct LLVMCoverageRegion& region) {
switch (region.kind) {
case LLVMCoverageRegionKind::CODE:
return coverage::CounterMappingRegion::RegionKind::CodeRegion;
case LLVMCoverageRegionKind::GAP:
return coverage::CounterMappingRegion::RegionKind::GapRegion;
case LLVMCoverageRegionKind::EXPANSION:
return coverage::CounterMappingRegion::RegionKind::ExpansionRegion;
}
}
static coverage::CounterMappingRegion createCounterMappingRegion(struct LLVMCoverageRegion& region) {
auto regionKind = determineRegionKind(region);
int expandedFileId = 0;
if (regionKind == coverage::CounterMappingRegion::RegionKind::ExpansionRegion) {
expandedFileId = region.expandedFileId;
}
const Counter &counter = coverage::Counter::getCounter(region.counterId);
return coverage::CounterMappingRegion(counter, region.fileId, expandedFileId, region.lineStart,
region.columnStart, region.lineEnd, region.columnEnd, regionKind);
}
LLVMFunctionCoverage* LLVMWriteCoverageRegionMapping(unsigned int *fileIdMapping, size_t fileIdMappingSize,
struct LLVMCoverageRegion **mappingRegions, size_t mappingRegionsSize) {
std::vector<coverage::CounterMappingRegion> counterMappingRegions;
for (size_t i = 0; i < mappingRegionsSize; ++i) {
struct LLVMCoverageRegion region = *mappingRegions[i];
counterMappingRegions.emplace_back(createCounterMappingRegion(region));
}
CoverageMappingWriter writer(ArrayRef<unsigned int>(fileIdMapping, fileIdMappingSize), None, counterMappingRegions);
std::string CoverageMapping;
raw_string_ostream OS(CoverageMapping);
writer.write(OS);
OS.flush();
// Should be disposed with `LLVMFunctionCoverageDispose`.
return new LLVMFunctionCoverage(CoverageMapping);
}
void LLVMFunctionCoverageDispose(struct LLVMFunctionCoverage* functionCoverage) {
delete functionCoverage;
}
static StructType *getFunctionRecordTy(LLVMContext &Ctx) {
#define COVMAP_FUNC_RECORD(Type, LLVMType, Name, Init) LLVMType,
Type *FunctionRecordTypes[] = {
#include "llvm/ProfileData/InstrProfData.inc"
};
StructType *FunctionRecordTy = StructType::get(Ctx, makeArrayRef(FunctionRecordTypes), true);
return FunctionRecordTy;
}
static llvm::Constant *addFunctionMappingRecord(llvm::LLVMContext &Ctx, StringRef NameValue, uint64_t FuncHash,
const std::string &CoverageMapping) {
llvm::StructType *FunctionRecordTy = getFunctionRecordTy(Ctx);
#define COVMAP_FUNC_RECORD(Type, LLVMType, Name, Init) Init,
llvm::Constant *FunctionRecordVals[] = {
#include "llvm/ProfileData/InstrProfData.inc"
};
return llvm::ConstantStruct::get(FunctionRecordTy, makeArrayRef(FunctionRecordVals));
}
// See https://github.com/llvm/llvm-project/blob/fa8fa044ec46b94e64971efa8852df0d58114062/clang/lib/CodeGen/CoverageMappingGen.cpp#L1284.
LLVMValueRef LLVMAddFunctionMappingRecord(LLVMContextRef context, const char *name, uint64_t hash,
struct LLVMFunctionCoverage *coverageMapping) {
return llvm::wrap(addFunctionMappingRecord(*llvm::unwrap(context), name, hash, coverageMapping->coverageData));
}
// See https://github.com/llvm/llvm-project/blob/fa8fa044ec46b94e64971efa8852df0d58114062/clang/lib/CodeGen/CoverageMappingGen.cpp#L1335.
// Please note that llvm/ProfileData/InstrProfData.inc refers to variable names of the function that includes it. So be careful with renaming.
static llvm::GlobalVariable* emitCoverageGlobal(
llvm::LLVMContext &Ctx,
llvm::Module &module,
std::vector<llvm::Constant *> &FunctionRecords,
const llvm::SmallVector<StringRef, 16> &FilenameRefs,
const std::string &RawCoverageMappings) {
auto *Int32Ty = llvm::Type::getInt32Ty(Ctx);
std::string FilenamesAndCoverageMappings;
llvm::raw_string_ostream outputStream(FilenamesAndCoverageMappings);
CoverageFilenamesSectionWriter(FilenameRefs).write(outputStream);
outputStream << RawCoverageMappings;
size_t CoverageMappingSize = RawCoverageMappings.size();
size_t FilenamesSize = outputStream.str().size() - CoverageMappingSize;
// See https://llvm.org/docs/CoverageMappingFormat.html#llvm-ir-representation
//
// > Coverage mapping data which is an array of bytes. Zero paddings are added at the end to force 8 byte alignment.
//
if (size_t rem = outputStream.str().size() % 8) {
CoverageMappingSize += 8 - rem;
for (size_t i = 0; i < 8 - rem; ++i) {
outputStream << '\0';
}
}
StructType *functionRecordTy = getFunctionRecordTy(Ctx);
// Create the deferred function records array
auto functionRecordsTy = llvm::ArrayType::get(functionRecordTy, FunctionRecords.size());
auto functionRecordsVal = llvm::ConstantArray::get(functionRecordsTy, FunctionRecords);
llvm::Type *CovDataHeaderTypes[] = {
#define COVMAP_HEADER(Type, LLVMType, Name, Init) LLVMType,
#include "llvm/ProfileData/InstrProfData.inc"
};
auto CovDataHeaderTy = llvm::StructType::get(Ctx, makeArrayRef(CovDataHeaderTypes));
llvm::Constant *CovDataHeaderVals[] = {
#define COVMAP_HEADER(Type, LLVMType, Name, Init) Init,
#include "llvm/ProfileData/InstrProfData.inc"
};
auto covDataHeaderVal = llvm::ConstantStruct::get(CovDataHeaderTy, makeArrayRef(CovDataHeaderVals));
auto *filenamesAndMappingsVal = llvm::ConstantDataArray::getString(Ctx, outputStream.str(), false);
// Create the coverage data record
llvm::Type *covDataTypes[] = {CovDataHeaderTy, functionRecordsTy, filenamesAndMappingsVal->getType()};
auto covDataTy = llvm::StructType::get(Ctx, makeArrayRef(covDataTypes));
llvm::Constant *TUDataVals[] = {covDataHeaderVal, functionRecordsVal, filenamesAndMappingsVal};
auto covDataVal = llvm::ConstantStruct::get(covDataTy, makeArrayRef(TUDataVals));
// Will be deleted when module is disposed.
return new llvm::GlobalVariable(module, covDataTy, true, llvm::GlobalValue::InternalLinkage,
covDataVal, llvm::getCoverageMappingVarName());
}
static std::string createRawCoverageMapping(struct LLVMFunctionCoverage **functionCoverages, size_t functionCoveragesSize) {
std::vector<std::string> coverageMappings;
for (size_t i = 0; i < functionCoveragesSize; ++i) {
coverageMappings.emplace_back(functionCoverages[i]->coverageData);
}
return llvm::join(coverageMappings.begin(), coverageMappings.end(), "");
}
LLVMValueRef LLVMCoverageEmit(LLVMModuleRef moduleRef,
LLVMValueRef *records, size_t recordsSize,
const char **filenames, int *filenamesIndices, size_t filenamesSize,
struct LLVMFunctionCoverage **functionCoverages, size_t functionCoveragesSize) {
LLVMContext &ctx = *unwrap(LLVMGetModuleContext(moduleRef));
Module &module = *unwrap(moduleRef);
std::vector<Constant *> functionRecords;
for (size_t i = 0; i < recordsSize; ++i) {
functionRecords.push_back(dyn_cast<Constant>(unwrap(records[i])));
}
llvm::SmallVector<StringRef, 16> filenameRefs;
filenameRefs.resize(filenamesSize);
for (size_t i = 0; i < filenamesSize; ++i) {
if (sys::path::is_absolute(filenames[i])) {
filenameRefs[filenamesIndices[i]] = filenames[i];
} else {
SmallString<256> path(filenames[i]);
sys::fs::make_absolute(path);
sys::path::remove_dots(path, true);
filenameRefs[filenamesIndices[i]] = path;
}
}
const std::string &rawCoverageMappings = createRawCoverageMapping(functionCoverages, functionCoveragesSize);
GlobalVariable *coverageGlobal = emitCoverageGlobal(ctx, module, functionRecords, filenameRefs, rawCoverageMappings);
const std::string §ion = getInstrProfSectionName(IPSK_covmap, Triple(module.getTargetTriple()).getObjectFormat());
coverageGlobal->setSection(section);
coverageGlobal->setAlignment(8);
return wrap(coverageGlobal);
}
LLVMValueRef LLVMInstrProfIncrement(LLVMModuleRef moduleRef) {
Module &module = *unwrap(moduleRef);
return wrap(Intrinsic::getDeclaration(&module, Intrinsic::instrprof_increment, None));
}
LLVMValueRef LLVMCreatePGOFunctionNameVar(LLVMValueRef llvmFunction, const char *pgoFunctionName) {
auto *fnPtr = cast<llvm::Function>(unwrap(llvmFunction));
return wrap(createPGOFuncNameVar(*fnPtr, pgoFunctionName));
}
void LLVMAddInstrProfPass(LLVMPassManagerRef passManagerRef, const char* outputFileName) {
legacy::PassManagerBase *passManager = unwrap(passManagerRef);
InstrProfOptions options;
options.InstrProfileOutput = outputFileName;
passManager->add(createInstrProfilingLegacyPass(options));
}
void LLVMKotlinAddTargetLibraryInfoWrapperPass(LLVMPassManagerRef passManagerRef, const char* targetTriple) {
legacy::PassManagerBase *passManager = unwrap(passManagerRef);
passManager->add(new TargetLibraryInfoWrapperPass(Triple(targetTriple)));
}
void LLVMKotlinInitializeTargets() {
#define INIT_LLVM_TARGET(TargetName) \
LLVMInitialize##TargetName##TargetInfo();\
LLVMInitialize##TargetName##Target();\
LLVMInitialize##TargetName##TargetMC();
#if KONAN_MACOS
INIT_LLVM_TARGET(AArch64)
INIT_LLVM_TARGET(ARM)
INIT_LLVM_TARGET(Mips)
INIT_LLVM_TARGET(X86)
INIT_LLVM_TARGET(WebAssembly)
#elif KONAN_LINUX
INIT_LLVM_TARGET(AArch64)
INIT_LLVM_TARGET(ARM)
INIT_LLVM_TARGET(Mips)
INIT_LLVM_TARGET(X86)
INIT_LLVM_TARGET(WebAssembly)
#elif KONAN_WINDOWS
INIT_LLVM_TARGET(AArch64)
INIT_LLVM_TARGET(ARM)
INIT_LLVM_TARGET(X86)
INIT_LLVM_TARGET(WebAssembly)
#endif
#undef INIT_LLVM_TARGET
} | 4,207 |
435 | <gh_stars>100-1000
/**
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the license found in the LICENSE file in
* the root directory of this source tree.
*/
#import <Foundation/Foundation.h>
#import <UIKit/UIKit.h>
/**
FBMemoryProfilerPresenting
Protocol implemented by FBMemoryProfiler part that is responsible for UI.
*/
@protocol FBMemoryProfilerPresenting <NSObject>
/**
Custom presentation in memory profiler's container
*/
- (void)memoryProfilerPresenter:(nonnull id)presenter
presentViewController:(nonnull UIViewController *)viewController;
- (void)memoryProfilerPresenter:(nonnull id)presenter
dismissViewController:(nonnull UIViewController *)viewController;
- (BOOL)memoryProfilerCanPresent;
@end
| 249 |
1,132 | package javarepl;
import com.googlecode.totallylazy.Pair;
import com.googlecode.totallylazy.Sequence;
import com.googlecode.totallylazy.Sequences;
import com.googlecode.totallylazy.functions.Function1;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Type;
import java.net.ServerSocket;
import java.net.URI;
import java.net.URL;
import java.util.function.Consumer;
import java.util.jar.JarFile;
import java.util.jar.JarInputStream;
import java.util.jar.Manifest;
import java.util.zip.ZipEntry;
import static com.googlecode.totallylazy.Files.*;
import static com.googlecode.totallylazy.Randoms.takeFromValues;
import static com.googlecode.totallylazy.Sequences.*;
import static com.googlecode.totallylazy.Strings.replace;
import static com.googlecode.totallylazy.io.URLs.url;
import static com.googlecode.totallylazy.predicates.Predicates.is;
import static com.googlecode.totallylazy.predicates.Predicates.where;
import static java.lang.String.format;
import static java.lang.reflect.Modifier.isPublic;
import static java.net.URLDecoder.decode;
public class Utils {
public static Boolean javaVersionAtLeast(String version) {
return (System.getProperty("java.version").compareTo(version) >= 0);
}
public static String randomIdentifier(String prefix) {
return prefix + "$" + takeFromValues(characters("abcdefghijklmnopqrstuvwxyz1234567890")).take(20).toString("");
}
public static Type extractType(Type type) {
if (type instanceof Class<?>) {
Class<?> clazz = (Class<?>) type;
if (clazz.isAnonymousClass() || clazz.isSynthetic() || clazz.isMemberClass()) {
if (clazz.getGenericSuperclass().equals(Object.class)) {
return extractType(sequence(clazz.getGenericInterfaces())
.headOption()
.getOrElse(Object.class));
} else {
return extractType(clazz.getGenericSuperclass());
}
}
if (!isPublic(clazz.getModifiers()))
return extractType(clazz.getGenericSuperclass());
return clazz;
}
return type;
}
public static Throwable unwrapException(Throwable e) {
if (e instanceof InvocationTargetException)
return unwrapException(((InvocationTargetException) e).getTargetException());
return e;
}
public static URL resolveURL(String path) {
try {
return url(path);
} catch (Exception e) {
return url("file:" + path);
}
}
public static boolean isWebUrl(URL classpathUrl) {
return sequence("http", "https").contains(classpathUrl.getProtocol());
}
public static String applicationVersion() {
try {
File path = new File(decode(Main.class.getProtectionDomain().getCodeSource().getLocation().getPath(), "ISO-8859-1"));
if (!path.isDirectory()) {
JarInputStream jarStream = new JarInputStream(new FileInputStream(path));
Manifest manifest = jarStream.getManifest();
return manifest.getMainAttributes().getValue("Implementation-Version");
}
} catch (Exception e) {
// ignore
}
return "[unknown]";
}
public static <T> Sequence<Sequence<T>> permutations(Sequence<T> items) {
return powerSetPermutations(items).filter(where(Sequence::size, is(items.size())));
}
public static <T> Sequence<Sequence<T>> powerSetPermutations(Sequence<T> items) {
return cartesianProductPower(items, items.size()).append(Sequences.<T>empty());
}
private static <T> Sequence<Sequence<T>> cartesianProductPower(Sequence<T> items, int times) {
if (times == 0)
return items.cartesianProduct().map(Pair.functions.values()).unsafeCast();
return cartesianProductPower(items, times - 1)
.cartesianProduct(items)
.map(pair -> pair.first().append(pair.second()).unique())
.unique();
}
public static Function1<Class<?>, String> canonicalName() {
return Class::getCanonicalName;
}
public static String listValues(String name, Sequence<?> list) {
return format(name + ":\n %s\n", list.toString("\n").replaceAll("\n", "\n "));
}
public static int randomServerPort() {
try {
ServerSocket serverSocket = new ServerSocket(0);
Integer serverPort = serverSocket.getLocalPort();
serverSocket.close();
return serverPort;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public static File randomOutputDirectory() {
File file = temporaryDirectory("JavaREPL/" + randomFilename());
file.deleteOnExit();
return file;
}
public static JarFile jarFile(URI path) {
try {
return new JarFile(new File(path));
} catch (IOException e) {
throw new RuntimeException("Couldn't create jar file for path " + path, e);
}
}
public static Function1<URL, String> urlAsFilePath() {
return url -> new File(url.getFile()).getPath();
}
public static Sequence<String> entries(File file) {
if (file.isDirectory()) {
return recursiveFiles(file)
.map(path().then(replace(file.getPath() + File.separator, "")));
} else {
try {
return memorise(new JarFile(new File(file.toURI())).entries())
.map(ZipEntry::getName);
} catch (Exception e) {
System.err.println("Couldn't load entries from jar " + file.toURI() + ". " + e.getLocalizedMessage());
return empty();
}
}
}
public static Consumer<Throwable> throwException() {
return throwable -> {
throw new RuntimeException(throwable);
};
}
}
| 2,530 |
3,614 | package com.central.common.resolver;
import cn.hutool.core.util.StrUtil;
import com.central.common.annotation.LoginClient;
import com.central.common.constant.SecurityConstants;
import lombok.extern.slf4j.Slf4j;
import org.springframework.core.MethodParameter;
import org.springframework.web.bind.support.WebDataBinderFactory;
import org.springframework.web.context.request.NativeWebRequest;
import org.springframework.web.method.support.HandlerMethodArgumentResolver;
import org.springframework.web.method.support.ModelAndViewContainer;
import javax.servlet.http.HttpServletRequest;
/**
* head中的应用参数注入clientId中
*
* @author zlt
* @date 2019/7/10
*/
@Slf4j
public class ClientArgumentResolver implements HandlerMethodArgumentResolver {
/**
* 入参筛选
*
* @param methodParameter 参数集合
* @return 格式化后的参数
*/
@Override
public boolean supportsParameter(MethodParameter methodParameter) {
return methodParameter.hasParameterAnnotation(LoginClient.class) && methodParameter.getParameterType().equals(String.class);
}
/**
* @param methodParameter 入参集合
* @param modelAndViewContainer model 和 view
* @param nativeWebRequest web相关
* @param webDataBinderFactory 入参解析
* @return 包装对象
*/
@Override
public Object resolveArgument(MethodParameter methodParameter,
ModelAndViewContainer modelAndViewContainer,
NativeWebRequest nativeWebRequest,
WebDataBinderFactory webDataBinderFactory) {
HttpServletRequest request = nativeWebRequest.getNativeRequest(HttpServletRequest.class);
String clientId = request.getHeader(SecurityConstants.TENANT_HEADER);
if (StrUtil.isBlank(clientId)) {
log.warn("resolveArgument error clientId is empty");
}
return clientId;
}
}
| 782 |
956 | # This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) <NAME> <<EMAIL>>
# This program is published under a GPLv2 license
# scapy.contrib.description = Python-Can CANSocket
# scapy.contrib.status = loads
"""
Python-CAN CANSocket Wrapper.
"""
import time
import struct
from scapy.config import conf
from scapy.supersocket import SuperSocket
from scapy.error import warning
from scapy.layers.can import CAN
from can import BusABC as can_BusABC
from can import Message as can_Message
from can import CanError as can_Error
CAN_FRAME_SIZE = 16
CAN_INV_FILTER = 0x20000000
class CANSocket(SuperSocket):
nonblocking_socket = True
desc = "read/write packets at a given CAN interface " \
"using a python-can bus object"
def __init__(self, iface=None, timeout=1.0, basecls=CAN):
if issubclass(type(iface), can_BusABC):
self.basecls = basecls
self.iface = iface
self.ins = None
self.outs = None
self.timeout = timeout
else:
warning("Provide a python-can interface")
def recv_raw(self, x=0xffff):
msg = self.iface.recv(timeout=self.timeout)
if msg is None:
return None, None, None
hdr = msg.is_extended_id << 31 | msg.is_remote_frame << 30 | \
msg.is_error_frame << 29 | msg.arbitration_id
if conf.contribs['CAN']['swap-bytes']:
hdr = struct.unpack("<I", struct.pack(">I", hdr))[0]
dlc = msg.dlc << 24
pkt_data = struct.pack("!II", hdr, dlc) + bytes(msg.data)
return self.basecls, pkt_data, msg.timestamp
def send(self, x):
try:
msg = can_Message(is_remote_frame=x.flags == 0x2,
extended_id=x.flags == 0x4,
is_error_frame=x.flags == 0x1,
arbitration_id=x.identifier,
dlc=x.length,
data=bytes(x)[8:])
if hasattr(x, "sent_time"):
x.sent_time = time.time()
return self.iface.send(msg)
except can_Error as ex:
raise ex
@staticmethod
def select(sockets, remain=None):
"""This function is called during sendrecv() routine to select
the available sockets.
"""
if remain is not None:
max_timeout = remain / len(sockets)
for s in sockets:
if s.timeout > max_timeout:
s.timeout = max_timeout
# python-can sockets aren't selectable, so we return all of them
# sockets, None (means use the socket's recv() )
return sockets, None
@conf.commands.register
def srcan(pkt, iface=None, basecls=CAN, *args, **kargs):
s = CANSocket(iface, basecls=basecls)
a, b = s.sr(pkt, *args, **kargs)
s.close()
return a, b
| 1,373 |
453 | {
"plugins": [
["proposal-unicode-property-regex", {
"useUnicodeFlag": true
}]
]
}
| 50 |
348 | <gh_stars>100-1000
{"nom":"Guéblange-lès-Dieuze","dpt":"Moselle","inscrits":133,"abs":30,"votants":103,"blancs":14,"nuls":8,"exp":81,"res":[{"panneau":"2","voix":53},{"panneau":"1","voix":28}]} | 87 |
32,544 | package com.baeldung.boot.jsp.repository.impl;
import static org.junit.jupiter.api.Assertions.*;
import java.util.*;
import org.junit.jupiter.api.Test;
import com.baeldung.boot.jsp.repository.BookRepository;
import com.baeldung.boot.jsp.repository.model.BookData;
public class InMemoryBookRepositoryUnitTest {
@Test
public void givenEmtpyData_whenFindAll_thenReturnEmptyCollection() {
BookRepository bookRepository = new InMemoryBookRepository(Collections.emptyMap());
Collection<BookData> storedBooks = bookRepository.findAll();
assertEquals(0, storedBooks.size());
}
@Test
public void givenInitialData_whenFindAll_thenReturnInitialData() {
BookRepository bookRepository = new InMemoryBookRepository(initialBookData());
Collection<BookData> storedBooks = bookRepository.findAll();
assertEquals(3, storedBooks.size());
}
@Test
public void givenInitialData_whenFindUnavailableIsbn_thenReturnEmpty() {
BookRepository bookRepository = new InMemoryBookRepository(initialBookData());
Optional<BookData> storedBookOpt = bookRepository.findById("isbn4");
assertFalse(storedBookOpt.isPresent());
}
@Test
public void givenInitialData_whenFindAvailableIsbn_thenReturnItem() {
BookRepository bookRepository = new InMemoryBookRepository(initialBookData());
Optional<BookData> storedBookOpt = bookRepository.findById("isbn1");
assertTrue(storedBookOpt.isPresent());
}
@Test
public void givenAddedIsbn_whenFindAvailableIsbn_thenReturnItem() {
BookRepository bookRepository = new InMemoryBookRepository(Collections.emptyMap());
bookRepository.add(new BookData("isbn4", "name4", "author4"));
Optional<BookData> storedBookOpt = bookRepository.findById("isbn4");
assertTrue(storedBookOpt.isPresent());
}
private static Map<String, BookData> initialBookData() {
Map<String, BookData> initData = new HashMap<>();
initData.put("isbn1", new BookData("isbn1", "name1", "author1"));
initData.put("isbn2", new BookData("isbn2", "name2", "author2"));
initData.put("isbn3", new BookData("isbn3", "name3", "author3"));
return initData;
}
} | 834 |
543 | /*
* Copyright (c) 1996, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package sun.security.ssl;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
/**
* Output stream for handshake data. This is used only internally
* to the SSL classes.
*
* MT note: one thread at a time is presumed be writing handshake
* messages, but (after initial connection setup) it's possible to
* have other threads reading/writing application data. It's the
* SSLSocketImpl class that synchronizes record writes.
*
* @author <NAME>
*/
public class HandshakeOutStream extends ByteArrayOutputStream {
OutputRecord outputRecord; // May be null if not actually used to
// output handshake message records.
HandshakeOutStream(OutputRecord outputRecord) {
super();
this.outputRecord = outputRecord;
}
// Complete a handshaking message write. Called by HandshakeMessage.
void complete() throws IOException {
if (size() < 4) { // 4: handshake message header size
// internal_error alert will be triggered
throw new RuntimeException("handshake message is not available");
}
if (outputRecord != null) {
if (!outputRecord.isClosed()) {
outputRecord.encodeHandshake(buf, 0, count);
} else {
if (SSLLogger.isOn && SSLLogger.isOn("ssl")) {
SSLLogger.warning("outbound has closed, ignore outbound " +
"handshake messages", ByteBuffer.wrap(buf, 0, count));
}
}
// reset the byte array output stream
reset();
} // otherwise, the handshake outstream is temporarily used only.
}
//
// overridden ByteArrayOutputStream methods
//
@Override
public void write(byte[] b, int off, int len) {
// The maximum fragment size is 24 bytes.
checkOverflow(len, Record.OVERFLOW_OF_INT24);
super.write(b, off, len);
}
@Override
public void flush() throws IOException {
if (outputRecord != null) {
outputRecord.flush();
}
}
//
// handshake output stream management functions
//
/*
* Put integers encoded in standard 8, 16, 24, and 32 bit
* big endian formats. Note that OutputStream.write(int) only
* writes the least significant 8 bits and ignores the rest.
*/
void putInt8(int i) throws IOException {
checkOverflow(i, Record.OVERFLOW_OF_INT08);
super.write(i);
}
void putInt16(int i) throws IOException {
checkOverflow(i, Record.OVERFLOW_OF_INT16);
super.write(i >> 8);
super.write(i);
}
void putInt24(int i) throws IOException {
checkOverflow(i, Record.OVERFLOW_OF_INT24);
super.write(i >> 16);
super.write(i >> 8);
super.write(i);
}
void putInt32(int i) throws IOException {
super.write(i >> 24);
super.write(i >> 16);
super.write(i >> 8);
super.write(i);
}
/*
* Put byte arrays with length encoded as 8, 16, 24 bit
* integers in big-endian format.
*/
void putBytes8(byte[] b) throws IOException {
if (b == null) {
putInt8(0);
} else {
putInt8(b.length);
super.write(b, 0, b.length);
}
}
public void putBytes16(byte[] b) throws IOException {
if (b == null) {
putInt16(0);
} else {
putInt16(b.length);
super.write(b, 0, b.length);
}
}
void putBytes24(byte[] b) throws IOException {
if (b == null) {
putInt24(0);
} else {
putInt24(b.length);
super.write(b, 0, b.length);
}
}
/*
* Does the specified length overflow the limitation?
*/
private static void checkOverflow(int length, int limit) {
if (length >= limit) {
// internal_error alert will be triggered
throw new RuntimeException(
"Field length overflow, the field length (" +
length + ") should be less than " + limit);
}
}
}
| 2,119 |
13,184 | <filename>spm/Sources/IGListDiffKit/include/IGListIndexSetResult.h
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#import <Foundation/Foundation.h>
#import "IGListMoveIndex.h"
NS_ASSUME_NONNULL_BEGIN
/**
A result object returned when diffing with indexes.
*/
NS_SWIFT_NAME(ListIndexSetResult)
@interface IGListIndexSetResult : NSObject
/**
The indexes inserted into the new collection.
*/
@property (nonatomic, strong, readonly) NSIndexSet *inserts;
/**
The indexes deleted from the old collection.
*/
@property (nonatomic, strong, readonly) NSIndexSet *deletes;
/**
The indexes in the old collection that need updated.
*/
@property (nonatomic, strong, readonly) NSIndexSet *updates;
/**
The moves from an index in the old collection to an index in the new collection.
*/
@property (nonatomic, copy, readonly) NSArray<IGListMoveIndex *> *moves;
/**
A Read-only boolean that indicates whether the result has any changes or not.
`YES` if the result has changes, `NO` otherwise.
*/
@property (nonatomic, assign, readonly) BOOL hasChanges;
/**
Returns the index of the object with the specified identifier *before* the diff.
@param identifier The diff identifier of the object.
@return The index of the object before the diff, or `NSNotFound`.
@see `-[IGListDiffable diffIdentifier]`.
*/
- (NSInteger)oldIndexForIdentifier:(id<NSObject>)identifier;
/**
Returns the index of the object with the specified identifier *after* the diff.
@param identifier The diff identifier of the object.
@return The index path of the object after the diff, or `NSNotFound`.
@see `-[IGListDiffable diffIdentifier]`.
*/
- (NSInteger)newIndexForIdentifier:(id<NSObject>)identifier;
/**
Creates a new result object with operations safe for use in `UITableView` and `UICollectionView` batch updates.
*/
- (IGListIndexSetResult *)resultForBatchUpdates;
/**
:nodoc:
*/
- (instancetype)init NS_UNAVAILABLE;
/**
:nodoc:
*/
+ (instancetype)new NS_UNAVAILABLE;
@end
NS_ASSUME_NONNULL_END
| 646 |
418 | <filename>src/pretalx/person/migrations/0019_auto_20180821_1346.py<gh_stars>100-1000
# Generated by Django 2.0.8 on 2018-08-21 18:46
from django.db import migrations
def fill_names(apps, schema_editor):
User = apps.get_model("person", "User")
for user in User.objects.all():
if not user.name:
user.nick = user.name
user.save()
class Migration(migrations.Migration):
dependencies = [
("person", "0018_auto_20180812_1523"),
]
operations = [
migrations.RunPython(code=fill_names, reverse_code=migrations.RunPython.noop)
]
| 253 |
14,668 | <reponame>zealoussnow/chromium
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/test/chromedriver/log_replay/devtools_log_reader.h"
#include "base/base_paths.h"
#include "base/files/file_path.h"
#include "base/files/file_util.h"
#include "base/path_service.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace {
// Log files to test the reader against
const char* const kTestDataPath[] = {"chrome", "test", "chromedriver",
"log_replay", "test_data"};
const char kTestGetTitlePath[] = "testGetTitle_simple.log";
const char kOneEntryPath[] = "oneDevToolsEntry.log";
const char kTruncatedJSONPath[] = "truncatedJSON.log";
const char kReadableTimestampPathLinux[] = "testReadableTimestampLinux.log";
const char kReadableTimestampPathWin[] = "testReadableTimestampWindows.log";
base::FilePath GetLogFileFromLiteral(const char literal[]) {
base::FilePath root_dir;
CHECK(base::PathService::Get(base::DIR_SOURCE_ROOT, &root_dir));
for (int i = 0; i < 5; i++)
root_dir = root_dir.AppendASCII(kTestDataPath[i]);
base::FilePath result = root_dir.AppendASCII(literal);
CHECK(base::PathExists(result));
return result;
}
} // namespace
TEST(DevToolsLogReaderTest, Basic) {
base::FilePath path = GetLogFileFromLiteral(kTestGetTitlePath);
DevToolsLogReader reader(path);
std::unique_ptr<LogEntry> next = reader.GetNext(LogEntry::kHTTP);
EXPECT_TRUE(next != nullptr);
EXPECT_EQ(next->protocol_type, LogEntry::kHTTP);
EXPECT_EQ(next->command_name, "http://localhost:38037/json/version");
next = reader.GetNext(LogEntry::kHTTP);
EXPECT_TRUE(next != nullptr);
EXPECT_EQ(next->payload, "{\n \"string_key\": \"string_value\"\n}\n");
}
TEST(DevToolsLogReaderTest, ReadableTimeStampLinux) {
base::FilePath path = GetLogFileFromLiteral(kReadableTimestampPathLinux);
DevToolsLogReader reader(path);
std::unique_ptr<LogEntry> next = reader.GetNext(LogEntry::kHTTP);
EXPECT_TRUE(next != nullptr);
EXPECT_EQ(next->protocol_type, LogEntry::kHTTP);
EXPECT_EQ(next->command_name, "http://localhost:38037/json/version");
next = reader.GetNext(LogEntry::kHTTP);
EXPECT_TRUE(next != nullptr);
EXPECT_EQ(next->payload, "{\n \"string_key\": \"string_value\"\n}\n");
}
TEST(DevToolsLogReaderTest, ReadableTimeStampWindows) {
base::FilePath path = GetLogFileFromLiteral(kReadableTimestampPathWin);
DevToolsLogReader reader(path);
std::unique_ptr<LogEntry> next = reader.GetNext(LogEntry::kHTTP);
EXPECT_TRUE(next != nullptr);
EXPECT_EQ(next->protocol_type, LogEntry::kHTTP);
EXPECT_EQ(next->command_name, "http://localhost:38037/json/version");
next = reader.GetNext(LogEntry::kHTTP);
EXPECT_TRUE(next != nullptr);
EXPECT_EQ(next->payload, "{\n \"string_key\": \"string_value\"\n}\n");
}
TEST(DevToolsLogReaderTest, Multiple) {
base::FilePath path = GetLogFileFromLiteral(kTestGetTitlePath);
DevToolsLogReader reader(path);
std::unique_ptr<LogEntry> next;
for (int i = 0; i < 3; i++)
next = reader.GetNext(LogEntry::kHTTP);
EXPECT_TRUE(next != nullptr);
EXPECT_EQ(next->command_name, "http://localhost:38037/json");
next = reader.GetNext(LogEntry::kHTTP);
EXPECT_EQ(next->payload,
"[ {\n \"string_key1\": \"string_value1\"\n}, {\n "
"\"string_key2\": \"string_value2\"\n} ]\n");
}
TEST(DevToolsLogReaderTest, EndOfFile) {
base::FilePath path = GetLogFileFromLiteral(kOneEntryPath);
DevToolsLogReader reader(path);
std::unique_ptr<LogEntry> next = reader.GetNext(LogEntry::kHTTP);
EXPECT_TRUE(next != nullptr);
next = reader.GetNext(LogEntry::kHTTP);
EXPECT_TRUE(next == nullptr);
}
TEST(DevToolsLogReaderTest, WebSocketBasic) {
base::FilePath path = GetLogFileFromLiteral(kTestGetTitlePath);
DevToolsLogReader reader(path);
std::unique_ptr<LogEntry> next = reader.GetNext(LogEntry::kWebSocket);
EXPECT_TRUE(next != nullptr);
EXPECT_EQ(next->protocol_type, LogEntry::kWebSocket);
EXPECT_EQ(next->event_type, LogEntry::kRequest);
EXPECT_EQ(next->command_name, "Log.enable");
EXPECT_EQ(next->id, 1);
}
TEST(DevToolsLogReaderTest, WebSocketMultiple) {
base::FilePath path = GetLogFileFromLiteral(kTestGetTitlePath);
DevToolsLogReader reader(path);
std::unique_ptr<LogEntry> next = reader.GetNext(LogEntry::kWebSocket);
next = reader.GetNext(LogEntry::kWebSocket);
EXPECT_TRUE(next != nullptr);
EXPECT_EQ(next->event_type, LogEntry::kRequest);
EXPECT_EQ(next->command_name, "DOM.getDocument");
EXPECT_EQ(next->id, 2);
}
TEST(DevToolsLogReaderTest, WebSocketPayload) {
base::FilePath path = GetLogFileFromLiteral(kTestGetTitlePath);
DevToolsLogReader reader(path);
std::unique_ptr<LogEntry> next;
for (int i = 0; i < 3; i++)
next = reader.GetNext(LogEntry::kWebSocket);
EXPECT_TRUE(next != nullptr);
EXPECT_EQ(next->command_name, "Target.setAutoAttach");
EXPECT_EQ(next->id, 3);
EXPECT_EQ(
next->payload,
"{\n \"autoAttach\": true,\n \"waitForDebuggerOnStart\": false\n}\n");
}
TEST(DevToolsLogReaderTest, TruncatedJSON) {
base::FilePath path = GetLogFileFromLiteral(kTruncatedJSONPath);
DevToolsLogReader reader(path);
std::unique_ptr<LogEntry> next = reader.GetNext(LogEntry::kWebSocket);
EXPECT_TRUE(next == nullptr);
}
| 2,022 |
8,315 | package com.airbnb.epoxy;
import android.content.Context;
import android.widget.FrameLayout;
@ModelView(defaultLayout = 1)
public class TextPropModelView extends FrameLayout {
public TextPropModelView(Context context) {
super(context);
}
@TextProp
public void setTitle(CharSequence title) {
}
} | 95 |
8,437 | /*
* Tencent is pleased to support the open source community by making QMUI_Android available.
*
* Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
*
* Licensed under the MIT License (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
*
* http://opensource.org/licenses/MIT
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is
* distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.qmuiteam.qmui.arch.record;
import com.qmuiteam.qmui.arch.QMUILatestVisit;
public interface LatestVisitArgumentCollector {
/**
* Called by {@link QMUILatestVisit} to collect argument value
* Notice: This is called before onResume. So It can not used to save data
* produced after fragment resumed.
* @param editor RecordArgumentEditor
*/
void onCollectLatestVisitArgument(RecordArgumentEditor editor);
}
| 326 |
434 | <reponame>marianopeck/Seaside-1
{
"category" : "Seaside-GemStone-Email",
"classinstvars" : [
],
"classvars" : [
"From",
"To" ],
"commentStamp" : "dkh 11/24/2009 11:18",
"instvars" : [
],
"name" : "WAEmailErrorHandler",
"pools" : [
],
"super" : "WAErrorHandler",
"type" : "normal" }
| 150 |
839 | <filename>testutils/src/test/java/org/apache/cxf/test/XPathAssertTest.java<gh_stars>100-1000
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cxf.test;
import java.util.HashMap;
import java.util.Map;
import org.w3c.dom.Document;
import org.apache.cxf.staxutils.StaxUtils;
import org.junit.Test;
import static org.junit.Assert.fail;
public class XPathAssertTest {
@Test
public void testAssert() throws Exception {
Document document = StaxUtils.read(getClass().getResourceAsStream("test.xml"));
XPathAssert.assertValid("//a", document, null);
XPathAssert.assertInvalid("//aasd", document, null);
boolean f = false;
try {
XPathAssert.assertInvalid("//a", document, null);
f = true;
} catch (AssertionError e) {
// this is correct
}
if (f) {
fail("Expression is valid!");
}
try {
XPathAssert.assertValid("//aa", document, null);
f = true;
} catch (AssertionError e) {
// this is correct
}
if (f) {
fail("Expression is valid!");
}
XPathAssert.assertXPathEquals("//b", "foo", document, null);
}
@Test
public void testAssertNamespace() throws Exception {
Document document = StaxUtils.read(getClass().getResourceAsStream("test2.xml"));
Map<String, String> namespaces = new HashMap<>();
namespaces.put("a", "urn:foo");
namespaces.put("z", "urn:z");
XPathAssert.assertValid("//a:a", document, namespaces);
XPathAssert.assertValid("//z:b", document, namespaces);
}
}
| 919 |
955 | import pkg_resources
from nornir.init_nornir import InitNornir
__version__ = pkg_resources.get_distribution("nornir").version
__all__ = ("InitNornir", "__version__")
| 61 |
575 | <reponame>sarang-apps/darshan_browser
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CONTENT_BROWSER_BACKGROUND_FETCH_STORAGE_GET_REGISTRATION_TASK_H_
#define CONTENT_BROWSER_BACKGROUND_FETCH_STORAGE_GET_REGISTRATION_TASK_H_
#include <memory>
#include <string>
#include "base/callback_forward.h"
#include "content/browser/background_fetch/background_fetch.pb.h"
#include "content/browser/background_fetch/background_fetch_registration_id.h"
#include "content/browser/background_fetch/storage/database_task.h"
#include "third_party/blink/public/common/service_worker/service_worker_status_code.h"
#include "url/origin.h"
namespace content {
namespace background_fetch {
// Gets an active Background Fetch metadata entry from the database.
class GetRegistrationTask : public DatabaseTask {
public:
using GetRegistrationCallback = base::OnceCallback<void(
blink::mojom::BackgroundFetchError,
BackgroundFetchRegistrationId,
blink::mojom::BackgroundFetchRegistrationDataPtr)>;
GetRegistrationTask(DatabaseTaskHost* host,
int64_t service_worker_registration_id,
const url::Origin& origin,
const std::string& developer_id,
GetRegistrationCallback callback);
~GetRegistrationTask() override;
// DatabaseTask implementation:
void Start() override;
private:
void DidGetMetadata(
blink::mojom::BackgroundFetchError error,
std::unique_ptr<proto::BackgroundFetchMetadata> metadata_proto);
void FinishWithError(blink::mojom::BackgroundFetchError error) override;
std::string HistogramName() const override;
int64_t service_worker_registration_id_;
url::Origin origin_;
std::string developer_id_;
GetRegistrationCallback callback_;
std::unique_ptr<proto::BackgroundFetchMetadata> metadata_proto_;
base::WeakPtrFactory<GetRegistrationTask> weak_factory_{
this}; // Keep as last.
DISALLOW_COPY_AND_ASSIGN(GetRegistrationTask);
};
} // namespace background_fetch
} // namespace content
#endif // CONTENT_BROWSER_BACKGROUND_FETCH_STORAGE_GET_REGISTRATION_TASK_H_
| 784 |
14,668 | // Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <ppapi/c/pp_errors.h>
#include <ppapi/c/pp_module.h>
#include <ppapi/c/ppb.h>
#include <ppapi/c/ppp.h>
#include <ppapi/cpp/instance.h>
#include <ppapi/cpp/module.h>
#include "ppapi_simple/ps_interface.h"
#include "ppapi_simple/ps_internal.h"
class PSModule : public pp::Module {
public:
virtual pp::Instance* CreateInstance(PP_Instance instance) {
// Should not get here.
// This is only called by libppapi_cpp in Instance_DidCreate. That function
// is called by the PPP_Instance handler in libppapi_cpp, but we handle all
// plugin interfaces in ppapi_simple.
assert(0);
return NULL;
}
};
static PSModule* s_module;
namespace pp {
Module* Module::Get() {
return s_module;
}
// This shouldn't be called (it is only referenced by PPP_InitializeModule in
// ppapi_cpp, which we override), but is needed to successfully link.
Module* CreateModule() {
assert(0);
return NULL;
}
} // namespace pp
int32_t PPP_InitializeModule(PP_Module module_id,
PPB_GetInterface get_interface) {
g_ps_get_interface = get_interface;
PSInterfaceInit();
PSModule* module = new PSModule();
if (!module->InternalInit(module_id, get_interface)) {
delete s_module;
return PP_ERROR_FAILED;
}
s_module = module;
return PP_OK;
}
const void* PPP_GetInterface(const char* interface_name) {
return PSGetInterfaceImplementation(interface_name);
}
void PPP_ShutdownModule(void) {
delete s_module;
s_module = NULL;
}
| 593 |
1,652 | <filename>redis/redis-proxy-client/src/main/java/com/ctrip/framework/xpipe/redis/servlet/ProxyServletContextListener.java
package com.ctrip.framework.xpipe.redis.servlet;
import javax.servlet.ServletContext;
import javax.servlet.ServletContextEvent;
import javax.servlet.ServletContextListener;
import javax.servlet.ServletRegistration;
import javax.servlet.annotation.WebListener;
@WebListener
public class ProxyServletContextListener implements ServletContextListener {
@Override
public void contextInitialized(ServletContextEvent sce) {
ServletContext context = sce.getServletContext();
try {
Class.forName("javax.servlet.ServletRegistration");
ServletRegistration.Dynamic asr = context.addServlet("ProxyServlet", ProxyServlet.class);
if (asr != null) {
asr.setLoadOnStartup(Integer.MAX_VALUE);
}
} catch (ClassNotFoundException e) {
}
}
@Override
public void contextDestroyed(ServletContextEvent sce) {
}
}
| 396 |
348 | <reponame>chamberone/Leaflet.PixiOverlay
{"nom":"Saint-Michel-sur-Ternoise","circ":"1ère circonscription","dpt":"Pas-de-Calais","inscrits":732,"abs":377,"votants":355,"blancs":33,"nuls":6,"exp":316,"res":[{"nuance":"MDM","nom":"<NAME>","voix":201},{"nuance":"FN","nom":"<NAME>","voix":115}]} | 121 |
1,743 | <reponame>fordlaturnas/aws-serverless-airline-booking
{
"version": "0.2.0",
"configurations": [
{
"name": "Ingest Fn",
"type": "node",
"request": "attach",
"address": "localhost",
"port": 5858,
// Location to where the transpiled JS file is: follows CodeUri
"localRoot": "${workspaceRoot}/build/ingest",
"remoteRoot": "/var/task",
"protocol": "inspector",
"stopOnEntry": false,
// Same as LocalRoot given we run on a docker container
// outFiles allows VSCode debugger to know where the source code is after finding its sourceMap
"outFiles": [
"${workspaceRoot}/build/ingest/index.js"
],
// instructs debugger to use sourceMap to identify correct breakpoint line
// and more importantly expand line/column numbers correctly as code is minified
"sourceMaps": true
},
{
"name": "Get Fn",
"type": "node",
"request": "attach",
"address": "localhost",
"port": 5858,
// Location to where the transpiled JS file is: follows CodeUri
"localRoot": "${workspaceRoot}/build/get",
"remoteRoot": "/var/task",
"protocol": "inspector",
"stopOnEntry": false,
// Same as LocalRoot given we run on a docker container
// outFiles allows VSCode debugger to know where the source code is after finding its sourceMap
"outFiles": [
"${workspaceRoot}/build/get/index.js"
],
// instructs debugger to use sourceMap to identify correct breakpoint line
// and more importantly expand line/column numbers correctly as code is minified
"sourceMaps": true
},
]
} | 879 |
2,146 | <reponame>Costallat/hunter<gh_stars>1000+
#include <glog/logging.h>
int main(int argc, char* argv[]) {
google::InitGoogleLogging(argv[0]);
FLAGS_logtostderr = 1;
int num_cookies = 42;
LOG(INFO) << "Found " << num_cookies << " cookies";
}
| 105 |
7,470 | <reponame>asklar/hermes-windows<gh_stars>1000+
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#ifndef HERMES_VM_RUNTIMEMODULE_INLINE_H
#define HERMES_VM_RUNTIMEMODULE_INLINE_H
#include "hermes/VM/Runtime.h"
namespace hermes {
namespace vm {
inline Handle<Domain> RuntimeModule::getDomain(Runtime *runtime) {
auto optionalHandle = domain_.get(runtime_, &runtime_->getHeap());
assert(optionalHandle && "RuntimeModule has an invalid Domain");
return optionalHandle.getValue();
}
inline Domain *RuntimeModule::getDomainUnsafe(Runtime *runtime) {
Domain *domain = getNoHandle(domain_, &runtime_->getHeap());
assert(domain && "RuntimeModule has an invalid Domain");
return domain;
}
inline Domain *RuntimeModule::getDomainForSamplingProfiler() {
// Do not use a read barrier here, as this is called from the SamplingProfiler
// signal handler. The signal handler may have interrupted another read/write
// barrier, which the GC isn't prepared to handle. Don't use this anywhere
// else.
OptValue<Domain *> domain = domain_.unsafeGetOptionalNoReadBarrier();
assert(domain && "RuntimeModule has an invalid Domain");
return domain.getValue();
}
} // namespace vm
} // namespace hermes
#endif
| 403 |
393 | <filename>cloud-stream-rabbitmq-consumer-8803/src/main/java/com/liuscoding/springcloud/StreamMQApplication8803.java
package com.liuscoding.springcloud;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.cloud.netflix.eureka.EnableEurekaClient;
/**
* @className: StreamMQApplication8803
* @description:
* @author: liusCoding
* @create: 2020-06-10 13:59
*/
@SpringBootApplication
@EnableEurekaClient
public class StreamMQApplication8803 {
public static void main(String[] args) {
SpringApplication.run(StreamMQApplication8803.class,args);
}
}
| 213 |
9,724 | <gh_stars>1000+
// Copyright 2015 The Emscripten Authors. All rights reserved.
// Emscripten is available under two separate licenses, the MIT license and the
// University of Illinois/NCSA Open Source License. Both these licenses can be
// found in the LICENSE file.
#include <assert.h>
#include <emscripten.h>
extern "C" {
void one(char *data, int size) {
int *x = (int*)data;
int num = size/sizeof(int);
for (int i = 0; i < num; i++) {
x[i] += 1234;
}
emscripten_sleep(1000);
emscripten_worker_respond(data, size);
}
}
| 192 |
746 | <gh_stars>100-1000
#!/usr/bin/env impala-python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Utility functions used by the stress test to save and load runtime info
# about queries to and from JSON files.
from collections import defaultdict
import json
import logging
import os
import sys
from tests.stress.queries import Query
LOG = logging.getLogger(os.path.splitext(os.path.basename(__file__))[0])
# The version of the file format containing the collected query runtime info.
RUNTIME_INFO_FILE_VERSION = 3
def save_runtime_info(path, query, impala):
"""Updates the file at 'path' with the given query information."""
store = None
if os.path.exists(path):
with open(path) as file:
store = json.load(file)
_check_store_version(store)
if not store:
store = {
"host_names": list(), "db_names": dict(), "version": RUNTIME_INFO_FILE_VERSION}
with open(path, "w+") as file:
store["host_names"] = sorted([i.host_name for i in impala.impalads])
queries = store["db_names"].get(query.db_name, dict())
query_by_options = queries.get(query.sql, dict())
query_by_options[str(sorted(query.options.items()))] = query
queries[query.sql] = query_by_options
store["db_names"][query.db_name] = queries
class JsonEncoder(json.JSONEncoder):
def default(self, obj):
data = dict(obj.__dict__)
# Queries are stored by sql, so remove the duplicate data. Also don't store
# profiles as JSON values, but instead separately.
for k in ("sql", "solo_runtime_profile_with_spilling",
"solo_runtime_profile_without_spilling"):
if k in data:
del data[k]
return data
json.dump(
store, file, cls=JsonEncoder, sort_keys=True, indent=2, separators=(',', ': '))
def load_runtime_info(path, impala=None):
"""Reads the query runtime information at 'path' and returns a
dict<db_name, dict<sql, Query>>. Returns an empty dict if the hosts in the 'impala'
instance do not match the data in 'path'.
"""
queries_by_db_and_sql = defaultdict(lambda: defaultdict(dict))
if not os.path.exists(path):
return queries_by_db_and_sql
with open(path) as file:
store = json.load(file)
_check_store_version(store)
if (
impala and
store.get("host_names") != sorted([i.host_name for i in impala.impalads])
):
return queries_by_db_and_sql
for db_name, queries_by_sql in store["db_names"].iteritems():
for sql, queries_by_options in queries_by_sql.iteritems():
for options, json_query in queries_by_options.iteritems():
query = Query()
query.__dict__.update(json_query)
query.sql = sql
queries_by_db_and_sql[db_name][sql][options] = query
return queries_by_db_and_sql
def _check_store_version(store):
"""Clears 'store' if the version is too old or raises an error if the version is too
new.
"""
if store["version"] < RUNTIME_INFO_FILE_VERSION:
LOG.warn("Runtime file info version is old and will be ignored")
store.clear()
elif store["version"] > RUNTIME_INFO_FILE_VERSION:
raise Exception(
"Unexpected runtime file info version %s expected %s"
% (store["version"], RUNTIME_INFO_FILE_VERSION))
def print_runtime_info_comparison(old_runtime_info, new_runtime_info):
# TODO: Provide a way to call this from the CLI. This was hard coded to run from main()
# when it was used.
print(",".join([
"Database", "Query",
"Old Mem MB w/Spilling",
"New Mem MB w/Spilling",
"Diff %",
"Old Runtime w/Spilling",
"New Runtime w/Spilling",
"Diff %",
"Old Mem MB wout/Spilling",
"New Mem MB wout/Spilling",
"Diff %",
"Old Runtime wout/Spilling",
"New Runtime wout/Spilling",
"Diff %"]))
for db_name, old_queries in old_runtime_info.iteritems():
new_queries = new_runtime_info.get(db_name)
if not new_queries:
continue
for sql, old_query in old_queries.iteritems():
new_query = new_queries.get(sql)
if not new_query:
continue
sys.stdout.write(old_query["db_name"])
sys.stdout.write(",")
sys.stdout.write(old_query["name"])
sys.stdout.write(",")
for attr in [
"required_mem_mb_with_spilling", "solo_runtime_secs_with_spilling",
"required_mem_mb_without_spilling", "solo_runtime_secs_without_spilling"
]:
old_value = old_query[attr]
sys.stdout.write(str(old_value))
sys.stdout.write(",")
new_value = new_query[attr]
sys.stdout.write(str(new_value))
sys.stdout.write(",")
if old_value and new_value is not None:
sys.stdout.write("%0.2f%%" % (100 * float(new_value - old_value) / old_value))
else:
sys.stdout.write("N/A")
sys.stdout.write(",")
print()
| 2,163 |
2,143 | package com.tngtech.archunit.testutils;
import java.lang.annotation.Annotation;
import com.tngtech.archunit.core.domain.JavaModifier;
import static java.lang.String.format;
public class ExpectedField {
public static ExpectedField.Creator of(Class<?> owner, String fieldName) {
return new ExpectedField.Creator(owner, fieldName);
}
public static class Creator {
private final Class<?> clazz;
private final String fieldName;
private Creator(Class<?> clazz, String fieldName) {
this.clazz = clazz;
this.fieldName = fieldName;
}
public ExpectedMessage doesNotHaveModifier(JavaModifier modifier) {
return field("does not have modifier " + modifier);
}
public ExpectedMessage beingAnnotatedWith(Class<? extends Annotation> annotationType) {
return field("is annotated with @" + annotationType.getSimpleName());
}
private ExpectedMessage field(String message) {
String fieldDescription = format("Field <%s.%s>", clazz.getName(), fieldName);
String sourceCodeLocation = format("(%s.java:0)", clazz.getSimpleName());
return new ExpectedMessage(format("%s %s in %s", fieldDescription, message, sourceCodeLocation));
}
}
}
| 491 |
1,936 | #include "ceres-error-terms/inertial-error-term-eigen.h"
#include <iostream>
#include <imu-integrator/imu-integrator-eigen.h>
#include <maplab-common/quaternion-math.h>
#include "ceres-error-terms/parameterization/quaternion-param-eigen.h"
namespace ceres_error_terms {
template <typename Derived>
void DrawSparsityPattern(
const Eigen::MatrixBase<Derived>& matrix, const std::string& name) {
std::cout << "-------- " << name << " --------" << std::endl;
for (int i = 0; i < matrix.rows(); ++i) {
for (int j = 0; j < matrix.cols(); ++j) {
if (matrix(i, j) != 0.0) {
std::cout << " * ";
} else {
std::cout << " ";
}
}
std::cout << std::endl << std::endl;
}
std::cout << "----------------------" << std::endl;
}
void InertialErrorTermEigen::IntegrateStateAndCovariance(
const InertialStateEigen& current_state,
const Eigen::Matrix<int64_t, 1, Eigen::Dynamic>& imu_timestamps_ns,
const Eigen::Matrix<double, 6, Eigen::Dynamic>& imu_data,
InertialStateEigen* next_state, InertialStateCovariance* phi_accum,
InertialStateCovariance* Q_accum) const {
CHECK_NOTNULL(next_state);
CHECK_NOTNULL(phi_accum);
CHECK_NOTNULL(Q_accum);
Eigen::Matrix<double, 2 * imu_integrator::kImuReadingSize, 1>
debiased_imu_readings;
InertialStateCovariance phi;
InertialStateCovariance new_phi_accum;
InertialStateCovariance Q;
InertialStateCovariance new_Q_accum;
Q_accum->setZero();
phi_accum->setIdentity();
typedef Eigen::Matrix<double, imu_integrator::kStateSize, 1>
InertialStateVector;
InertialStateVector current_state_vec, next_state_vec;
current_state_vec = current_state.toVector();
for (int i = 0; i < imu_data.cols() - 1; ++i) {
CHECK_GE(imu_timestamps_ns(0, i + 1), imu_timestamps_ns(0, i))
<< "IMU measurements not properly ordered";
const Eigen::Block<InertialStateVector, imu_integrator::kGyroBiasBlockSize,
1>
current_gyro_bias =
current_state_vec.segment<imu_integrator::kGyroBiasBlockSize>(
imu_integrator::kStateGyroBiasOffset);
const Eigen::Block<InertialStateVector, imu_integrator::kAccelBiasBlockSize,
1>
current_accel_bias =
current_state_vec.segment<imu_integrator::kAccelBiasBlockSize>(
imu_integrator::kStateAccelBiasOffset);
debiased_imu_readings << imu_data.col(i).segment<3>(
imu_integrator::kAccelReadingOffset) -
current_accel_bias,
imu_data.col(i).segment<3>(imu_integrator::kGyroReadingOffset) -
current_gyro_bias,
imu_data.col(i + 1).segment<3>(imu_integrator::kAccelReadingOffset) -
current_accel_bias,
imu_data.col(i + 1).segment<3>(imu_integrator::kGyroReadingOffset) -
current_gyro_bias;
const double delta_time_seconds =
(imu_timestamps_ns(0, i + 1) - imu_timestamps_ns(0, i)) *
imu_integrator::kNanoSecondsToSeconds;
integrator_.integrate(
current_state_vec, debiased_imu_readings, delta_time_seconds,
&next_state_vec, &phi, &Q);
current_state_vec = next_state_vec;
new_Q_accum = phi * (*Q_accum) * phi.transpose() + Q;
Q_accum->swap(new_Q_accum);
new_phi_accum = phi * (*phi_accum);
phi_accum->swap(new_phi_accum);
}
*next_state = InertialStateEigen::fromVector(next_state_vec);
}
bool InertialErrorTermEigen::Evaluate(
double const* const* parameters, double* residuals_ptr,
double** jacobians) const {
// Keep Jacobians in row-major for Ceres, Eigen default is column-major.
typedef Eigen::Matrix<double, imu_integrator::kErrorStateSize,
imu_integrator::kVelocityBlockSize, Eigen::RowMajor>
VelocityJacobian;
typedef Eigen::Matrix<double, imu_integrator::kErrorStateSize,
imu_integrator::kImuBiasBlockSize, Eigen::RowMajor>
ImuBiasJacobian;
typedef Eigen::Matrix<double, imu_integrator::kErrorStateSize,
imu_integrator::kStateOrientationBlockSize,
Eigen::RowMajor>
OrientationJacobian;
typedef Eigen::Matrix<double, imu_integrator::kErrorStateSize,
imu_integrator::kPositionBlockSize, Eigen::RowMajor>
PositionJacobian;
const double* q_from_ptr = parameters[kIdxOrientationFrom];
const double* v_from_ptr = parameters[kIdxVelocityFrom];
const double* bw_from_ptr = parameters[kIdxImuBiasFrom];
const double* ba_from_ptr = parameters[kIdxImuBiasFrom] + 3;
const double* p_from_ptr = parameters[kIdxPositionFrom];
const double* q_to_ptr = parameters[kIdxOrientationTo];
const double* v_to_ptr = parameters[kIdxVelocityTo];
const double* bw_to_ptr = parameters[kIdxImuBiasTo];
const double* ba_to_ptr = parameters[kIdxImuBiasTo] + 3;
const double* p_to_ptr = parameters[kIdxPositionTo];
Eigen::Map<const Eigen::Vector4d> q_M_I_from(q_from_ptr);
Eigen::Map<const Eigen::Vector3d> b_g_from(bw_from_ptr);
Eigen::Map<const Eigen::Vector3d> I_v_I_from(v_from_ptr);
Eigen::Map<const Eigen::Vector3d> b_a_from(ba_from_ptr);
Eigen::Map<const Eigen::Vector3d> M_p_MI_from(p_from_ptr);
Eigen::Map<const Eigen::Vector4d> q_M_I_to(q_to_ptr);
Eigen::Map<const Eigen::Vector3d> b_g_to(bw_to_ptr);
Eigen::Map<const Eigen::Vector3d> I_v_I_to(v_to_ptr);
Eigen::Map<const Eigen::Vector3d> b_a_to(ba_to_ptr);
Eigen::Map<const Eigen::Vector3d> M_p_MI_to(p_to_ptr);
Eigen::Map<Eigen::Matrix<double, imu_integrator::kErrorStateSize, 1> >
residuals(residuals_ptr);
if (VLOG_IS_ON(5) && I_v_I_from.squaredNorm() == 0 &&
I_v_I_to.squaredNorm() == 0) {
VLOG(5) << "The velocity at both keyframes is zero.";
}
// Integrate the IMU measurements.
InertialStateEigen begin_state;
begin_state.q_M_I = q_M_I_from;
begin_state.b_g = b_g_from;
begin_state.I_v_I = I_v_I_from;
begin_state.b_a = b_a_from;
begin_state.M_p_MI = M_p_MI_from;
// Reuse a previous integration if the linearization point hasn't changed.
const bool cache_is_valid = integration_cache_.valid &&
(integration_cache_.begin_state == begin_state);
if (!cache_is_valid) {
integration_cache_.begin_state = begin_state;
IntegrateStateAndCovariance(
integration_cache_.begin_state, imu_timestamps_ns_, imu_data_,
&integration_cache_.end_state, &integration_cache_.phi_accum,
&integration_cache_.Q_accum);
integration_cache_.L_cholesky_Q_accum.compute(integration_cache_.Q_accum);
integration_cache_.valid = true;
}
CHECK(integration_cache_.valid);
if (imu_covariance_cached_) {
// TODO(slynen): double check frame of reference.
imu_covariance_cached_->block<3, 3>(0, 0) =
integration_cache_.Q_accum.block<3, 3>(0, 0);
imu_covariance_cached_->block<3, 3>(0, 3) =
integration_cache_.Q_accum.block<3, 3>(0, 12);
imu_covariance_cached_->block<3, 3>(3, 3) =
integration_cache_.Q_accum.block<3, 3>(12, 12);
imu_covariance_cached_->block<3, 3>(3, 0) =
integration_cache_.Q_accum.block<3, 3>(12, 0);
}
if (residuals_ptr) {
Eigen::Quaterniond quaternion_to;
quaternion_to.coeffs() = q_M_I_to;
const Eigen::Quaterniond quaternion_integrated(
integration_cache_.end_state.q_M_I);
Eigen::Vector3d orientation_error;
common::eigen_quaternion_helpers::Minus(
quaternion_to, quaternion_integrated, &orientation_error);
// Note: The residual must have the same order as the Jacobians!!!
residuals << orientation_error, b_g_to - integration_cache_.end_state.b_g,
I_v_I_to - integration_cache_.end_state.I_v_I,
b_a_to - integration_cache_.end_state.b_a,
M_p_MI_to - integration_cache_.end_state.M_p_MI;
integration_cache_.L_cholesky_Q_accum.matrixL().solveInPlace(residuals);
} else {
LOG(WARNING)
<< "Skipped residual calculation, since residual pointer was NULL";
}
if (jacobians != NULL) {
if (!cache_is_valid) {
InertialJacobianType& J_end = integration_cache_.J_end;
InertialJacobianType& J_begin = integration_cache_.J_begin;
Eigen::Quaterniond quaternion_to;
quaternion_to.coeffs() = q_M_I_to;
const Eigen::Quaterniond quaternion_integrated(
integration_cache_.end_state.q_M_I);
// These are the Jacobians lifting the orientation in the tangent space to
// the quaternions in the state.
EigenQuaternionParameterization::LiftJacobian lift_jacobian_from;
EigenQuaternionParameterization::LiftJacobian lift_jacobian_to;
// Jacobians required to properly account for the manifold structure of
// SO3. Reminder: q_residual = q_to boxminus q_integrated
// TODO(burrimi): Check if this is actually improving the convergence.
// Otherwise just set to identity as done in almost every BA.
Eigen::Matrix3d J_boxminus_wrt_q_to;
Eigen::Matrix3d J_boxminus_wrt_q_integrated;
common::eigen_quaternion_helpers::GetBoxminusJacobians(
quaternion_to, quaternion_integrated, &J_boxminus_wrt_q_to,
&J_boxminus_wrt_q_integrated);
EigenQuaternionParameterization parameterization;
parameterization.ComputeLiftJacobian(
q_M_I_from.data(), lift_jacobian_from.data());
parameterization.ComputeLiftJacobian(
q_M_I_to.data(), lift_jacobian_to.data());
// Calculate the Jacobian for the end of the edge:
J_end.setZero();
J_end.block<3, 4>(0, 0) = J_boxminus_wrt_q_to * lift_jacobian_to;
J_end.block<12, 12>(3, 4) = Eigen::Matrix<double, 12, 12>::Identity();
// Since Ceres separates the actual Jacobian from the Jacobian of the
// local
// parameterization, we apply the inverse of the local parameterization.
// Ceres can then apply the local parameterization Jacobian on top of this
// and we get the correct Jacobian in the end. This is necessary since we
// propagate the state in the tangent space of the manifold.
J_begin.setZero();
J_begin.block<3, 4>(0, 0) =
J_boxminus_wrt_q_integrated *
integration_cache_.phi_accum.block<3, 3>(0, 0) * lift_jacobian_from;
J_begin.block<3, 12>(0, 4) =
-integration_cache_.phi_accum.block<3, 12>(0, 3);
J_begin.block<12, 4>(3, 0) =
-integration_cache_.phi_accum.block<12, 3>(3, 0) * lift_jacobian_from;
J_begin.block<12, 12>(3, 4) =
-integration_cache_.phi_accum.block<12, 12>(3, 3);
// Invert and apply by using backsolve.
integration_cache_.L_cholesky_Q_accum.matrixL().solveInPlace(J_end);
integration_cache_.L_cholesky_Q_accum.matrixL().solveInPlace(J_begin);
}
const InertialJacobianType& J_end = integration_cache_.J_end;
const InertialJacobianType& J_begin = integration_cache_.J_begin;
if (jacobians[kIdxOrientationFrom] != NULL) {
Eigen::Map<OrientationJacobian> J(jacobians[kIdxOrientationFrom]);
J = J_begin.middleCols<imu_integrator::kStateOrientationBlockSize>(
imu_integrator::kStateOrientationOffset);
}
if (jacobians[kIdxPositionFrom] != NULL) {
Eigen::Map<PositionJacobian> J(jacobians[kIdxPositionFrom]);
J = J_begin.middleCols<imu_integrator::kPositionBlockSize>(
imu_integrator::kStatePositionOffset);
}
if (jacobians[kIdxVelocityFrom] != NULL) {
Eigen::Map<VelocityJacobian> J(jacobians[kIdxVelocityFrom]);
J = J_begin.middleCols<imu_integrator::kVelocityBlockSize>(
imu_integrator::kStateVelocityOffset);
}
if (jacobians[kIdxImuBiasFrom] != NULL) {
Eigen::Map<ImuBiasJacobian> J(jacobians[kIdxImuBiasFrom]);
J.leftCols<imu_integrator::kGyroBiasBlockSize>() =
J_begin.middleCols<imu_integrator::kGyroBiasBlockSize>(
imu_integrator::kStateGyroBiasOffset);
J.rightCols<imu_integrator::kAccelBiasBlockSize>() =
J_begin.middleCols<imu_integrator::kAccelBiasBlockSize>(
imu_integrator::kStateAccelBiasOffset);
}
if (jacobians[kIdxOrientationTo] != NULL) {
Eigen::Map<OrientationJacobian> J(jacobians[kIdxOrientationTo]);
J = J_end.middleCols<imu_integrator::kStateOrientationBlockSize>(
imu_integrator::kStateOrientationOffset);
}
if (jacobians[kIdxPositionTo] != NULL) {
Eigen::Map<PositionJacobian> J(jacobians[kIdxPositionTo]);
J = J_end.middleCols<imu_integrator::kPositionBlockSize>(
imu_integrator::kStatePositionOffset);
}
if (jacobians[kIdxVelocityTo] != NULL) {
Eigen::Map<VelocityJacobian> J(jacobians[kIdxVelocityTo]);
J = J_end.middleCols<imu_integrator::kVelocityBlockSize>(
imu_integrator::kStateVelocityOffset);
}
if (jacobians[kIdxImuBiasTo] != NULL) {
Eigen::Map<ImuBiasJacobian> J(jacobians[kIdxImuBiasTo]);
J.leftCols<imu_integrator::kGyroBiasBlockSize>() =
J_end.middleCols<imu_integrator::kGyroBiasBlockSize>(
imu_integrator::kStateGyroBiasOffset);
J.rightCols<imu_integrator::kAccelBiasBlockSize>() =
J_end.middleCols<imu_integrator::kAccelBiasBlockSize>(
imu_integrator::kStateAccelBiasOffset);
}
}
return true;
}
} /* namespace ceres_error_terms */
| 5,929 |
2,073 | <filename>pac4j-oauth/src/main/java/org/pac4j/scribe/builder/api/WeiboApi20.java
package org.pac4j.scribe.builder.api;
import com.github.scribejava.core.oauth2.bearersignature.BearerSignature;
import com.github.scribejava.core.oauth2.bearersignature.BearerSignatureURIQueryParameter;
import org.pac4j.scribe.extractors.WeiboJsonExtractor;
import com.github.scribejava.core.builder.api.DefaultApi20;
import com.github.scribejava.core.extractors.TokenExtractor;
import com.github.scribejava.core.model.OAuth2AccessToken;
/**
* This class represents the OAuth API implementation for Weibo using OAuth protocol
* version 2. It could be part of the Scribe library.
* <p>More info at: <a href="http://open.weibo.com/wiki/Oauth2/authorize">OAuth2.0</a></p>
*
* @author zhangzhenli
* @since 3.1.0
*/
public class WeiboApi20 extends DefaultApi20 {
public static final String BASE_URL = "https://api.weibo.com/oauth2/";
@Override
public String getAccessTokenEndpoint() {
return BASE_URL + "access_token";
}
@Override
protected String getAuthorizationBaseUrl() {
return BASE_URL + "authorize";
}
@Override
public TokenExtractor<OAuth2AccessToken> getAccessTokenExtractor() {
return WeiboJsonExtractor.instance();
}
@Override
public BearerSignature getBearerSignature() {
return BearerSignatureURIQueryParameter.instance();
}
}
| 504 |
439 | <filename>exercises/practice/queen-attack/.meta/src/reference/java/Queen.java
final class Queen {
private final int row;
private final int column;
Queen(final int row, final int column) {
this.row = row;
this.column = column;
validatePosition();
}
int getRow() {
return row;
}
int getColumn() {
return column;
}
private void validatePosition() {
validatePositionComponent(row, "row");
validatePositionComponent(column, "column");
}
private void validatePositionComponent(final int value, final String componentName) {
if (value < 0) {
throw new IllegalArgumentException("Queen position must have positive " + componentName + ".");
}
if (value > 7) {
throw new IllegalArgumentException("Queen position must have " + componentName + " <= 7.");
}
}
}
| 348 |
1,687 | public class Solution4 {
// 参考资料:https://zhuanlan.zhihu.com/p/189500715
/**
* @param board a 2D board containing 'X' and 'O'
* @return void
*/
public void solve(char[][] board) {
if (board == null || board.length == 0 || board[0].length == 0) {
return;
}
int n = board.length, m = board[0].length;
// 第 1 步:四周向中间搜
for (int i = 0; i < n; i++) {
for (int j = 0; j < m; j++) {
if (!inAround(i, j, n, m)) {
continue;
}
if (board[i][j] == 'O') {
dfs(board, i, j);
}
}
}
// 第 2 步:遍历图,更新结果
for (int i = 0; i < n; i++) {
for (int j = 0; j < m; j++) {
if (board[i][j] == 'O') {
board[i][j] = 'X';
} else if (board[i][j] == '*') {
board[i][j] = 'O';
}
}
}
}
public void dfs(char[][] board, int x, int y) {
// 保证当前点在图中
if (x < 0 || y < 0 || x >= board.length || y >= board[0].length) {
return;
}
if (board[x][y] != 'O') {
return;
}
board[x][y] = '*';
dfs(board, x + 1, y);
dfs(board, x - 1, y);
dfs(board, x, y + 1);
dfs(board, x, y - 1);
}
/**
* 是否在四周
*
* @param x
* @param y
* @param n
* @param m
* @return
*/
public boolean inAround(int x, int y, int n, int m) {
return x == 0 || y == 0 || x == n - 1 || y == m - 1;
}
} | 1,050 |
436 | //------------------------------------------------------------------------------
//
// Name: vadd.c
//
// Purpose: Elementwise addition of two vectors (c = a + b)
//
// HISTORY: Written by <NAME>, December 2009
// Updated by <NAME> and <NAME>, October 2012
// Updated by <NAME>, July 2013
// Updated by <NAME>, October 2014
//
//------------------------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#ifdef __APPLE__
#include <OpenCL/opencl.h>
#include <unistd.h>
#else
#include <CL/cl.h>
#endif
#include "err_code.h"
//pick up device type from compiler command line or from
//the default type
#ifndef DEVICE
#define DEVICE CL_DEVICE_TYPE_DEFAULT
#endif
extern double wtime(); // returns time since some fixed past point (wtime.c)
extern int output_device_info(cl_device_id );
//------------------------------------------------------------------------------
#define TOL (0.001) // tolerance used in floating point comparisons
#define LENGTH (1024) // length of vectors a, b, and c
//------------------------------------------------------------------------------
//
// kernel: vadd
//
// Purpose: Compute the elementwise sum c = a+b
//
// input: a and b float vectors of length count
//
// output: c float vector of length count holding the sum a + b
//
const char *KernelSource = "\n" \
"__kernel void vadd( \n" \
" __global float* a, \n" \
" __global float* b, \n" \
" __global float* c, \n" \
" const unsigned int count) \n" \
"{ \n" \
" int i = get_global_id(0); \n" \
" if(i < count) \n" \
" c[i] = a[i] + b[i]; \n" \
"} \n" \
"\n";
//------------------------------------------------------------------------------
int main(int argc, char** argv)
{
int err; // error code returned from OpenCL calls
float* h_a = (float*) calloc(LENGTH, sizeof(float)); // a vector
float* h_b = (float*) calloc(LENGTH, sizeof(float)); // b vector
float* h_c = (float*) calloc(LENGTH, sizeof(float)); // c vector (a+b) returned from the compute device
unsigned int correct; // number of correct results
size_t global; // global domain size
cl_device_id device_id; // compute device id
cl_context context; // compute context
cl_command_queue commands; // compute command queue
cl_program program; // compute program
cl_kernel ko_vadd; // compute kernel
cl_mem d_a; // device memory used for the input a vector
cl_mem d_b; // device memory used for the input b vector
cl_mem d_c; // device memory used for the output c vector
// Fill vectors a and b with random float values
int i = 0;
int count = LENGTH;
for(i = 0; i < count; i++){
h_a[i] = rand() / (float)RAND_MAX;
h_b[i] = rand() / (float)RAND_MAX;
}
// Set up platform and GPU device
cl_uint numPlatforms;
// Find number of platforms
err = clGetPlatformIDs(0, NULL, &numPlatforms);
checkError(err, "Finding platforms");
if (numPlatforms == 0)
{
printf("Found 0 platforms!\n");
return EXIT_FAILURE;
}
// Get all platforms
cl_platform_id Platform[numPlatforms];
err = clGetPlatformIDs(numPlatforms, Platform, NULL);
checkError(err, "Getting platforms");
// Secure a GPU
for (i = 0; i < numPlatforms; i++)
{
err = clGetDeviceIDs(Platform[i], DEVICE, 1, &device_id, NULL);
if (err == CL_SUCCESS)
{
break;
}
}
if (device_id == NULL)
checkError(err, "Finding a device");
err = output_device_info(device_id);
checkError(err, "Printing device output");
// Create a compute context
context = clCreateContext(0, 1, &device_id, NULL, NULL, &err);
checkError(err, "Creating context");
// Create a command queue
commands = clCreateCommandQueue(context, device_id, 0, &err);
checkError(err, "Creating command queue");
// Create the compute program from the source buffer
program = clCreateProgramWithSource(context, 1, (const char **) & KernelSource, NULL, &err);
checkError(err, "Creating program");
// Build the program
err = clBuildProgram(program, 0, NULL, NULL, NULL, NULL);
if (err != CL_SUCCESS)
{
size_t len;
char buffer[2048];
printf("Error: Failed to build program executable!\n%s\n", err_code(err));
clGetProgramBuildInfo(program, device_id, CL_PROGRAM_BUILD_LOG, sizeof(buffer), buffer, &len);
printf("%s\n", buffer);
return EXIT_FAILURE;
}
// Create the compute kernel from the program
ko_vadd = clCreateKernel(program, "vadd", &err);
checkError(err, "Creating kernel");
// Create the input (a, b) and output (c) arrays in device memory
d_a = clCreateBuffer(context, CL_MEM_READ_ONLY, sizeof(float) * count, NULL, &err);
checkError(err, "Creating buffer d_a");
d_b = clCreateBuffer(context, CL_MEM_READ_ONLY, sizeof(float) * count, NULL, &err);
checkError(err, "Creating buffer d_b");
d_c = clCreateBuffer(context, CL_MEM_WRITE_ONLY, sizeof(float) * count, NULL, &err);
checkError(err, "Creating buffer d_c");
// Write a and b vectors into compute device memory
err = clEnqueueWriteBuffer(commands, d_a, CL_TRUE, 0, sizeof(float) * count, h_a, 0, NULL, NULL);
checkError(err, "Copying h_a to device at d_a");
err = clEnqueueWriteBuffer(commands, d_b, CL_TRUE, 0, sizeof(float) * count, h_b, 0, NULL, NULL);
checkError(err, "Copying h_b to device at d_b");
// Set the arguments to our compute kernel
err = clSetKernelArg(ko_vadd, 0, sizeof(cl_mem), &d_a);
err |= clSetKernelArg(ko_vadd, 1, sizeof(cl_mem), &d_b);
err |= clSetKernelArg(ko_vadd, 2, sizeof(cl_mem), &d_c);
err |= clSetKernelArg(ko_vadd, 3, sizeof(unsigned int), &count);
checkError(err, "Setting kernel arguments");
double rtime = wtime();
// Execute the kernel over the entire range of our 1d input data set
// letting the OpenCL runtime choose the work-group size
global = count;
err = clEnqueueNDRangeKernel(commands, ko_vadd, 1, NULL, &global, NULL, 0, NULL, NULL);
checkError(err, "Enqueueing kernel");
// Wait for the commands to complete before stopping the timer
err = clFinish(commands);
checkError(err, "Waiting for kernel to finish");
rtime = wtime() - rtime;
printf("\nThe kernel ran in %lf seconds\n",rtime);
// Read back the results from the compute device
err = clEnqueueReadBuffer( commands, d_c, CL_TRUE, 0, sizeof(float) * count, h_c, 0, NULL, NULL );
if (err != CL_SUCCESS)
{
printf("Error: Failed to read output array!\n%s\n", err_code(err));
exit(1);
}
// Test the results
correct = 0;
float tmp;
for(i = 0; i < count; i++)
{
tmp = h_a[i] + h_b[i]; // assign element i of a+b to tmp
tmp -= h_c[i]; // compute deviation of expected and output result
if(tmp*tmp < TOL*TOL) // correct if square deviation is less than tolerance squared
correct++;
else {
printf(" tmp %f h_a %f h_b %f h_c %f \n",tmp, h_a[i], h_b[i], h_c[i]);
}
}
// summarise results
printf("C = A+B: %d out of %d results were correct.\n", correct, count);
// cleanup then shutdown
clReleaseMemObject(d_a);
clReleaseMemObject(d_b);
clReleaseMemObject(d_c);
clReleaseProgram(program);
clReleaseKernel(ko_vadd);
clReleaseCommandQueue(commands);
clReleaseContext(context);
free(h_a);
free(h_b);
free(h_c);
return 0;
}
| 3,900 |
399 | /*-
* #%L
* athena-elasticsearch
* %%
* Copyright (C) 2019 - 2020 Amazon Web Services
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
package com.amazonaws.athena.connectors.elasticsearch;
import com.amazonaws.athena.connector.lambda.data.SchemaBuilder;
import org.apache.arrow.util.VisibleForTesting;
import org.apache.arrow.vector.types.Types;
import org.apache.arrow.vector.types.pojo.Field;
import org.apache.arrow.vector.types.pojo.FieldType;
import org.apache.arrow.vector.types.pojo.Schema;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
/**
* This class has interfaces used for the parsing and creation of a schema based on an index mapping retrieved
* from an Elasticsearch instance. It also has an interface for converting Elasticsearch data types to Apache Arrow.
*/
class ElasticsearchSchemaUtils
{
private static final Logger logger = LoggerFactory.getLogger(ElasticsearchSchemaUtils.class);
private ElasticsearchSchemaUtils() {}
/**
* Main parsing method for the GET <index>/_mapping request.
* @param mappings is the structure that contains the metadata definitions for the index, as well as the _meta
* property used to define list fields.
* @return a Schema derived from the mapping.
*/
protected static Schema parseMapping(Map<String, Object> mappings)
{
// Used to store the _meta structure (the mapping containing the fields that should be considered a list).
Map<String, Object> meta = new HashMap<>();
SchemaBuilder builder = SchemaBuilder.newBuilder();
// Elasticsearch does not have a dedicated array type. All fields can contain zero or more elements
// so long as they are of the same type. For this reasons, users will have to add a _meta property
// to the indices they intend on using with Athena. This property is used in the building of the
// Schema to indicate which fields should be considered a LIST.
if (mappings.containsKey("_meta")) {
meta.putAll((Map) mappings.get("_meta"));
}
if (mappings.containsKey("properties")) {
Map<String, Object> fields = (Map) mappings.get("properties");
for (Map.Entry<String, Object> entry : fields.entrySet()) {
String fieldName = entry.getKey();
Map<String, Object> value = (Map) entry.getValue();
builder.addField(inferField(fieldName, fieldName, value, meta));
}
}
return builder.build();
}
/**
* Parses the response to GET index/_mapping recursively to derive the index's schema.
* @param fieldName is the name of the current field being processed (e.g. street).
* @param qualifiedName is the qualified name of the field (e.g. address.street).
* @param mapping is the current map of the element in question.
* @param meta is the map of fields that are considered to be lists.
* @return a Field object injected with the field's info.
*/
private static Field inferField(String fieldName, String qualifiedName,
Map<String, Object> mapping, Map<String, Object> meta)
{
Field field;
if (mapping.containsKey("properties")) {
// Process STRUCT.
Map<String, Object> childFields = (Map) mapping.get("properties");
List<Field> children = new ArrayList<>();
for (Map.Entry<String, Object> entry : childFields.entrySet()) {
String childField = entry.getKey();
Map<String, Object> value = (Map) entry.getValue();
children.add(inferField(childField, qualifiedName + "." + childField, value, meta));
}
field = new Field(fieldName, FieldType.nullable(Types.MinorType.STRUCT.getType()), children);
}
else {
field = new Field(fieldName, toFieldType(mapping), null);
if (meta.containsKey(qualifiedName)) {
// Process LIST.
return new Field(fieldName, FieldType.nullable(Types.MinorType.LIST.getType()),
Collections.singletonList(field));
}
}
return field;
}
/**
* Convert the data type from Elasticsearch to Arrow and injects it in a FieldType.
* @param mapping is the map containing the Elasticsearch datatype.
* @return a new FieldType corresponding to the Elasticsearch type.
*/
private static FieldType toFieldType(Map<String, Object> mapping)
{
logger.debug("toFieldType - enter: " + mapping);
String elasticType = (String) mapping.get("type");
Types.MinorType minorType;
Map<String, String> metadata = new HashMap<>();
switch (elasticType) {
case "text":
case "keyword":
case "binary":
minorType = Types.MinorType.VARCHAR;
break;
case "long":
minorType = Types.MinorType.BIGINT;
break;
case "integer":
minorType = Types.MinorType.INT;
break;
case "short":
minorType = Types.MinorType.SMALLINT;
break;
case "byte":
minorType = Types.MinorType.TINYINT;
break;
case "double":
minorType = Types.MinorType.FLOAT8;
break;
case "scaled_float":
// Store the scaling factor in the field's metadata map.
minorType = Types.MinorType.BIGINT;
metadata.put("scaling_factor", mapping.get("scaling_factor").toString());
break;
case "float":
case "half_float":
minorType = Types.MinorType.FLOAT4;
break;
case "date":
case "date_nanos":
minorType = Types.MinorType.DATEMILLI;
break;
case "boolean":
minorType = Types.MinorType.BIT;
break;
default:
minorType = Types.MinorType.NULL;
break;
}
logger.debug("Arrow Type: {}, metadata: {}", minorType.toString(), metadata);
return new FieldType(true, minorType.getType(), null, metadata);
}
/**
* Checks that two Schema objects are equal using the following criteria:
* 1) The Schemas must have the same number of fields.
* 2) The corresponding fields in the two Schema objects must also be the same irrespective of ordering within
* the Schema object using the following criteria:
* a) The fields' names must match.
* b) The fields' Arrow types must match.
* c) The fields' children lists (used for complex fields, e.g. LIST and STRUCT) must match irrespective of
* field ordering within the lists.
* d) The fields' metadata maps must match. Currently that's only applicable for scaled_float data types that
* use the field's metadata map to store the scaling factor associated with the data type.
* @param mapping1 is a mapping to be compared.
* @param mapping2 is a mapping to be compared.
* @return true if the lists are equal, false otherwise.
*/
@VisibleForTesting
protected static final boolean mappingsEqual(Schema mapping1, Schema mapping2)
{
logger.info("mappingsEqual - Enter - Mapping1: {}, Mapping2: {}", mapping1, mapping2);
// Schemas must have the same number of elements.
if (mapping1.getFields().size() != mapping2.getFields().size()) {
logger.warn("Mappings are different sizes - Mapping1: {}, Mapping2: {}",
mapping1.getFields().size(), mapping2.getFields().size());
return false;
}
// Mappings must have the same fields (irrespective of internal ordering).
for (Field field1 : mapping1.getFields()) {
Field field2 = mapping2.findField(field1.getName());
// Corresponding fields must have the same Arrow types or the Schemas are deemed not equal.
if (field2 == null || field1.getType() != field2.getType()) {
logger.warn("Fields' types do not match - Field1: {}, Field2: {}",
field1.getType(), field2 == null ? "null" : field2.getType());
return false;
}
logger.info("Field1 Name: {}, Field1 Type: {}, Field1 Metadata: {}",
field1.getName(), field1.getType(), field1.getMetadata());
logger.info("Field2 Name: {}, Field2 Type: {}, Field2 Metadata: {}",
field2.getName(), field2.getType(), field2.getMetadata());
// The corresponding fields' children and metadata maps must also match or the Schemas are deemed not equal.
if (!childrenEqual(field1.getChildren(), field2.getChildren()) ||
!field1.getMetadata().equals(field2.getMetadata())) {
return false;
}
}
return true;
}
/**
* Checks that two lists of Field objects (corresponding to the children lists of two corresponding fields in
* two different Schema objects) are the same irrespective of ordering within the lists using the following
* criteria:
* 1) The lists of Field objects must be the same size.
* 2) The corresponding fields' names must match.
* 3) The corresponding fields' Arrow types must match.
* 4) The corresponding fields' children lists (used for complex fields, e.g. LIST and STRUCT) must match
* irrespective of field ordering within the lists.
* 5) The corresponding fields' metadata maps must match. Currently that's only applicable for scaled_float
* data types that use the field's metadata map to store the scaling factor associated with the data type.
* @param list1 is a list of children fields to be compared.
* @param list2 is a list of children fields to be compared.
* @return true if the lists are equal, false otherwise.
*/
private static final boolean childrenEqual(List<Field> list1, List<Field> list2)
{
logger.info("childrenEqual - Enter - Children1: {}, Children2: {}", list1, list2);
// Children lists must have the same number of elements.
if (list1.size() != list2.size()) {
logger.warn("Children lists are different sizes - List1: {}, List2: {}", list1.size(), list2.size());
return false;
}
Map<String, Field> fields = new LinkedHashMap<>();
list2.forEach(value -> fields.put(value.getName(), value));
// lists must have the same Fields (irrespective of internal ordering).
for (Field field1 : list1) {
// Corresponding fields must have the same Arrow types or the Schemas are deemed not equal.
Field field2 = fields.get(field1.getName());
if (field2 == null || field1.getType() != field2.getType()) {
logger.warn("Fields' types do not match - Field1: {}, Field2: {}",
field1.getType(), field2 == null ? "null" : field2.getType());
return false;
}
logger.info("Field1 Name: {}, Field1 Type: {}, Field1 Metadata: {}",
field1.getName(), field1.getType(), field1.getMetadata());
logger.info("Field2 Name: {}, Field2 Type: {}, Field2 Metadata: {}",
field2.getName(), field2.getType(), field2.getMetadata());
// The corresponding fields' children and metadata maps must also match or the Schemas are deemed not equal.
if (!childrenEqual(field1.getChildren(), field2.getChildren()) ||
!field1.getMetadata().equals(field2.getMetadata())) {
return false;
}
}
return true;
}
}
| 5,021 |
5,169 | <reponame>Gantios/Specs<gh_stars>1000+
{
"name": "EasyDate",
"version": "0.11",
"summary": "Categoryes for making NSDates easy",
"license": "MIT",
"platforms": {
"ios": "9.0",
"osx": "10.10"
},
"source": {
"git": "https://github.com/BadChoice/EasyDate.git",
"tag": "0.11"
},
"dependencies": {
"DateTools": [
]
},
"description": "Working with NSDates is total pain, ugly and unmemorable. This library simplifies it all",
"homepage": "https://github.com/BadChoice/EasyDate.git",
"authors": {
"<NAME>": "<EMAIL>"
},
"source_files": [
"EasyDate/lib/**/*"
],
"requires_arc": true
}
| 268 |
348 | <gh_stars>100-1000
{"nom":"Saint-Glen","circ":"3ème circonscription","dpt":"Côtes-d'Armor","inscrits":493,"abs":220,"votants":273,"blancs":6,"nuls":7,"exp":260,"res":[{"nuance":"LR","nom":"<NAME>","voix":151},{"nuance":"REM","nom":"<NAME>","voix":109}]} | 103 |
12,366 | // Copyright 2018 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef TINK_STREAMING_AEAD_H_
#define TINK_STREAMING_AEAD_H_
#include <memory>
#include "absl/strings/string_view.h"
#include "tink/input_stream.h"
#include "tink/output_stream.h"
#include "tink/random_access_stream.h"
#include "tink/util/statusor.h"
namespace crypto {
namespace tink {
// An interface for streaming authenticated encryption with associated data.
// Streaming encryption is typically used for encrypting large plaintexts such
// as large files. Tink may eventually contain multiple interfaces for
// streaming encryption depending on the supported properties. This interface
// supports a streaming interface for symmetric encryption with
// authentication. The underlying encryption modes are selected so that partial
// plaintext can be obtained fast by decrypting and authenticating just a part
// of the ciphertext.
class StreamingAead {
public:
// Returns a wrapper around 'ciphertext_destination', such that any bytes
// written via the wrapper are AEAD-encrypted using 'associated_data' as
// associated authenticated data. The associated data is not included in the
// ciphertext and has to be passed in as parameter for decryption.
// ByteCount() of the wrapper returns the number of written plaintext bytes.
// Closing the wrapper results in closing of the wrapped stream.
virtual crypto::tink::util::StatusOr<
std::unique_ptr<crypto::tink::OutputStream>>
NewEncryptingStream(
std::unique_ptr<crypto::tink::OutputStream> ciphertext_destination,
absl::string_view associated_data) = 0;
// Returns a wrapper around 'ciphertext_source', such that reading
// via the wrapper leads to AEAD-decryption of the underlying ciphertext,
// using 'associated_data' as associated authenticated data, and the
// read bytes are bytes of the resulting plaintext.
// ByteCount() of the wrapper returns the number of read plaintext bytes.
virtual crypto::tink::util::StatusOr<
std::unique_ptr<crypto::tink::InputStream>>
NewDecryptingStream(
std::unique_ptr<crypto::tink::InputStream> ciphertext_source,
absl::string_view associated_data) = 0;
// Returns a wrapper around 'ciphertext_source', such that reading
// via the wrapper leads to AEAD-decryption of the underlying ciphertext,
// using 'associated_data' as associated authenticated data, and the
// read bytes are bytes of the resulting plaintext.
// Note that the returned wrapper's size()-method reports size that is
// not checked for integrity. For example, if the ciphertext file has been
// truncated then size() will return a wrong result. Reading the last block
// of the plaintext will verify whether size() is correct.
// Reading through the wrapper is thread safe.
virtual crypto::tink::util::StatusOr<
std::unique_ptr<crypto::tink::RandomAccessStream>>
NewDecryptingRandomAccessStream(
std::unique_ptr<crypto::tink::RandomAccessStream> ciphertext_source,
absl::string_view associated_data) = 0;
virtual ~StreamingAead() {}
};
} // namespace tink
} // namespace crypto
#endif // TINK_STREAMING_AEAD_H_
| 1,031 |
749 | <reponame>liu12151407/OXChart
package com.openxu.cview.xmstock.bean;
import java.util.List;
/**
* autour : xiami
* date : 2018/3/13 14:59
* className : ZhabanInfo
* version : 1.0
* description : 炸板
*/
public class ZhabanInfo {
private List<List<String>> zhaban_line;
private String zhaban_tag;
public List<List<String>> getZhaban_line() {
return zhaban_line;
}
public void setZhaban_line(List<List<String>> zhaban_line) {
this.zhaban_line = zhaban_line;
}
public String getZhaban_tag() {
return zhaban_tag;
}
public void setZhaban_tag(String zhaban_tag) {
this.zhaban_tag = zhaban_tag;
}
}
| 303 |
884 | /*
* Copyright 2014 - 2021 Blazebit.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.blazebit.persistence.view.impl.type;
import com.blazebit.persistence.view.spi.type.TypeConverter;
import java.lang.reflect.Method;
import java.lang.reflect.Type;
import java.sql.Time;
/**
*
* @author <NAME>
* @since 1.2.0
*/
public class TimeToLocalTimeTypeConverter implements TypeConverter<Time, Object> {
private static final Method TO_LOCAL_TIME;
private static final Method VALUE_OF;
static {
Method toLocalTime = null;
Method valueOf = null;
try {
Class<?> c = java.sql.Time.class;
Class<?> localTimeClass = Class.forName("java.time.LocalTime");
toLocalTime = c.getMethod("toLocalTime");
valueOf = c.getMethod("valueOf", localTimeClass);
} catch (Exception e) {
// Ignore
}
TO_LOCAL_TIME = toLocalTime;
VALUE_OF = valueOf;
}
@Override
public Class<?> getUnderlyingType(Class<?> owningClass, Type declaredType) {
return Time.class;
}
@Override
public Object convertToViewType(Time object) {
if (object == null) {
return null;
}
try {
return VALUE_OF.invoke(null, object);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@Override
public Time convertToUnderlyingType(Object object) {
if (object == null) {
return null;
}
try {
return (Time) TO_LOCAL_TIME.invoke(object);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
| 872 |
5,411 | /*
* This file is part of the CitizenFX project - http://citizen.re/
*
* See LICENSE and MENTIONS in the root of the source tree for information
* regarding licensing.
*/
// This file contains code adapted from the original GTA IV script hook, the
// copyright notice for which follows below.
/*****************************************************************************\
Copyright (C) 2009, Aru <<EMAIL>>
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
\*****************************************************************************/
#include "StdInc.h"
#include "scrThread.h"
#include "scrEngine.h"
#include <Hooking.h>
static hook::thiscall_stub<rage::eThreadState(GtaThread*, uint32_t)> _gtaThread__Tick([]()
{
return hook::get_pattern("56 6A 18 8B F1 E8 ? ? ? ? 50 8D");
});
static hook::thiscall_stub<void(GtaThread*)> _gtaThread__Kill([]()
{
return hook::get_pattern("57 8B F9 8B 0D ? ? ? ? 57 8B 01");
});
rage::eThreadState GtaThread::Tick(uint32_t opsToExecute)
{
return _gtaThread__Tick(this, opsToExecute);
//EAXJMP(0xBBCDF0);
}
void GtaThread::Kill()
{
_gtaThread__Kill(this);
//EAXJMP(0xBBCE70);
}
rage::eThreadState GtaThread::Run(uint32_t opsToExecute)
{
// set the current thread
rage::scrThread* activeThread = rage::scrEngine::GetActiveThread();
rage::scrEngine::SetActiveThread(this);
// invoke the running thing if we're not dead
if (m_Context.State != rage::ThreadStateKilled)
{
DoRun();
}
rage::scrEngine::SetActiveThread(activeThread);
return m_Context.State;
}
rage::eThreadState GtaThread::Reset(uint32_t scriptHash, void* pArgs, uint32_t argCount)
{
m_Context.IP = 0;
m_Context.SP = 0;
m_Context.FrameSP = m_Context.SP;
m_Context.TimerA = 0;
m_Context.TimerB = 0;
m_Context.TimerC = 0;
m_Context.ExIP = 0;
m_Context.ExFrameSP = 0;
m_Context.ExSP = 0;
m_Context._f50 = 0;
m_Context.State = rage::ThreadStateIdle;
m_Context.ScriptHash = scriptHash;
// zero out gtathread bits
memset(&_f88, 0, ((uintptr_t)&m_nFlags - (uintptr_t)&_f88) + 4);
m_pszExitMessage = "Normal exit";
m_bCanBePaused = true;
m_paused = false;
return m_Context.State;
}
| 953 |
1,136 | <filename>external/DCNv2/testcpu.py
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import time
import torch
import torch.nn as nn
from torch.autograd import gradcheck
from dcn_v2 import dcn_v2_conv, DCNv2, DCN
from dcn_v2 import dcn_v2_pooling, DCNv2Pooling, DCNPooling
deformable_groups = 1
N, inC, inH, inW = 2, 2, 4, 4
outC = 2
kH, kW = 3, 3
def conv_identify(weight, bias):
weight.data.zero_()
bias.data.zero_()
o, i, h, w = weight.shape
y = h//2
x = w//2
for p in range(i):
for q in range(o):
if p == q:
weight.data[q, p, y, x] = 1.0
def check_zero_offset():
conv_offset = nn.Conv2d(inC, deformable_groups * 2 * kH * kW,
kernel_size=(kH, kW),
stride=(1, 1),
padding=(1, 1),
bias=True)
conv_mask = nn.Conv2d(inC, deformable_groups * 1 * kH * kW,
kernel_size=(kH, kW),
stride=(1, 1),
padding=(1, 1),
bias=True)
dcn_v2 = DCNv2(inC, outC, (kH, kW),
stride=1, padding=1, dilation=1,
deformable_groups=deformable_groups)
conv_offset.weight.data.zero_()
conv_offset.bias.data.zero_()
conv_mask.weight.data.zero_()
conv_mask.bias.data.zero_()
conv_identify(dcn_v2.weight, dcn_v2.bias)
input = torch.randn(N, inC, inH, inW)
offset = conv_offset(input)
mask = conv_mask(input)
mask = torch.sigmoid(mask)
output = dcn_v2(input, offset, mask)
output *= 2
d = (input - output).abs().max()
if d < 1e-10:
print('Zero offset passed')
else:
print('Zero offset failed')
print(input)
print(output)
def check_gradient_dconv():
input = torch.rand(N, inC, inH, inW) * 0.01
input.requires_grad = True
offset = torch.randn(N, deformable_groups * 2 * kW * kH, inH, inW) * 2
# offset.data.zero_()
# offset.data -= 0.5
offset.requires_grad = True
mask = torch.rand(N, deformable_groups * 1 * kW * kH, inH, inW)
# mask.data.zero_()
mask.requires_grad = True
mask = torch.sigmoid(mask)
weight = torch.randn(outC, inC, kH, kW)
weight.requires_grad = True
bias = torch.rand(outC)
bias.requires_grad = True
stride = 1
padding = 1
dilation = 1
print('check_gradient_dconv: ',
gradcheck(dcn_v2_conv, (input, offset, mask, weight, bias,
stride, padding, dilation, deformable_groups),
eps=1e-3, atol=1e-4, rtol=1e-2))
def check_pooling_zero_offset():
input = torch.randn(2, 16, 64, 64).zero_()
input[0, :, 16:26, 16:26] = 1.
input[1, :, 10:20, 20:30] = 2.
rois = torch.tensor([
[0, 65, 65, 103, 103],
[1, 81, 41, 119, 79],
]).float()
pooling = DCNv2Pooling(spatial_scale=1.0 / 4,
pooled_size=7,
output_dim=16,
no_trans=True,
group_size=1,
trans_std=0.0)
out = pooling(input, rois, input.new())
s = ', '.join(['%f' % out[i, :, :, :].mean().item()
for i in range(rois.shape[0])])
print(s)
dpooling = DCNv2Pooling(spatial_scale=1.0 / 4,
pooled_size=7,
output_dim=16,
no_trans=False,
group_size=1,
trans_std=0.0)
offset = torch.randn(20, 2, 7, 7).zero_()
dout = dpooling(input, rois, offset)
s = ', '.join(['%f' % dout[i, :, :, :].mean().item()
for i in range(rois.shape[0])])
print(s)
def check_gradient_dpooling():
input = torch.randn(2, 3, 5, 5) * 0.01
N = 4
batch_inds = torch.randint(2, (N, 1)).float()
x = torch.rand((N, 1)).float() * 15
y = torch.rand((N, 1)).float() * 15
w = torch.rand((N, 1)).float() * 10
h = torch.rand((N, 1)).float() * 10
rois = torch.cat((batch_inds, x, y, x + w, y + h), dim=1)
offset = torch.randn(N, 2, 3, 3)
input.requires_grad = True
offset.requires_grad = True
spatial_scale = 1.0 / 4
pooled_size = 3
output_dim = 3
no_trans = 0
group_size = 1
trans_std = 0.0
sample_per_part = 4
part_size = pooled_size
print('check_gradient_dpooling:',
gradcheck(dcn_v2_pooling, (input, rois, offset,
spatial_scale,
pooled_size,
output_dim,
no_trans,
group_size,
part_size,
sample_per_part,
trans_std),
eps=1e-4))
def example_dconv():
input = torch.randn(2, 64, 128, 128)
# wrap all things (offset and mask) in DCN
dcn = DCN(64, 64, kernel_size=(3, 3), stride=1,
padding=1, deformable_groups=2)
# print(dcn.weight.shape, input.shape)
output = dcn(input)
targert = output.new(*output.size())
targert.data.uniform_(-0.01, 0.01)
error = (targert - output).mean()
error.backward()
print(output.shape)
def example_dpooling():
input = torch.randn(2, 32, 64, 64)
batch_inds = torch.randint(2, (20, 1)).float()
x = torch.randint(256, (20, 1)).float()
y = torch.randint(256, (20, 1)).float()
w = torch.randint(64, (20, 1)).float()
h = torch.randint(64, (20, 1)).float()
rois = torch.cat((batch_inds, x, y, x + w, y + h), dim=1)
offset = torch.randn(20, 2, 7, 7)
input.requires_grad = True
offset.requires_grad = True
# normal roi_align
pooling = DCNv2Pooling(spatial_scale=1.0 / 4,
pooled_size=7,
output_dim=32,
no_trans=True,
group_size=1,
trans_std=0.1)
# deformable pooling
dpooling = DCNv2Pooling(spatial_scale=1.0 / 4,
pooled_size=7,
output_dim=32,
no_trans=False,
group_size=1,
trans_std=0.1)
out = pooling(input, rois, offset)
dout = dpooling(input, rois, offset)
print(out.shape)
print(dout.shape)
target_out = out.new(*out.size())
target_out.data.uniform_(-0.01, 0.01)
target_dout = dout.new(*dout.size())
target_dout.data.uniform_(-0.01, 0.01)
e = (target_out - out).mean()
e.backward()
e = (target_dout - dout).mean()
e.backward()
def example_mdpooling():
input = torch.randn(2, 32, 64, 64)
input.requires_grad = True
batch_inds = torch.randint(2, (20, 1)).float()
x = torch.randint(256, (20, 1)).float()
y = torch.randint(256, (20, 1)).float()
w = torch.randint(64, (20, 1)).float()
h = torch.randint(64, (20, 1)).float()
rois = torch.cat((batch_inds, x, y, x + w, y + h), dim=1)
# mdformable pooling (V2)
dpooling = DCNPooling(spatial_scale=1.0 / 4,
pooled_size=7,
output_dim=32,
no_trans=False,
group_size=1,
trans_std=0.1,
deform_fc_dim=1024)
dout = dpooling(input, rois)
target = dout.new(*dout.size())
target.data.uniform_(-0.1, 0.1)
error = (target - dout).mean()
error.backward()
print(dout.shape)
if __name__ == '__main__':
example_dconv()
example_dpooling()
example_mdpooling()
check_pooling_zero_offset()
# zero offset check
if inC == outC:
check_zero_offset()
check_gradient_dpooling()
check_gradient_dconv()
# """
# ****** Note: backward is not reentrant error may not be a serious problem,
# ****** since the max error is less than 1e-7,
# ****** Still looking for what trigger this problem
# """
| 4,456 |
337 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.eventmesh.common.loadbalance;
import java.util.concurrent.atomic.AtomicInteger;
public class Weight<T> {
private T target;
private final int weight;
private final AtomicInteger currentWeight;
public Weight(T target, int weight) {
this.target = target;
this.weight = weight;
this.currentWeight = new AtomicInteger(0);
}
public void decreaseTotal(int total) {
currentWeight.addAndGet(-1 * total);
}
public void increaseCurrentWeight() {
currentWeight.addAndGet(weight);
}
public T getTarget() {
return target;
}
public void setTarget(T target) {
this.target = target;
}
public int getWeight() {
return weight;
}
public AtomicInteger getCurrentWeight() {
return currentWeight;
}
@Override
public String toString() {
return "Wight{"
+ "target=" + target
+ ", weight=" + weight
+ ", currentWeight=" + currentWeight
+ '}';
}
}
| 636 |
622 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
package org.tensorflow.lite.annotations;
import java.lang.annotation.ElementType;
import java.lang.annotation.Target;
/**
* Annotation used for marking methods and fields that are called by reflection. Useful for keeping
* components that would otherwise be removed by Proguard. Use the value parameter to mention a file
* that calls this method.
*/
@Target({ElementType.METHOD, ElementType.FIELD, ElementType.TYPE, ElementType.CONSTRUCTOR})
public @interface UsedByReflection {
String value();
}
| 285 |
1,292 | /**
*
*/
package org.sikuli.guide;
import java.awt.Color;
import java.awt.Point;
import java.awt.Rectangle;
public class Connector extends SikuliGuideArrow {
public Connector(SikuliGuideComponent source, SikuliGuideComponent destination){
super(source, destination);
// setForeground(Color.white);
// sourceComponent = source;
// destinationComponent = destination;
}
@Override
protected void updateBounds() {
super.updateBounds();
Point src = getSource();
Point dest = getDestination();
int dx = src.x - dest.x;
int dy = src.y - dest.y;
if (Math.abs(dx) < Math.abs(dy))
setStyle(ELBOW_X);
else
setStyle(ELBOW_Y);
}
} | 335 |
1,023 | <filename>src/plugins/console/server/lib/spec_definitions/json/generated/cluster.remote_info.json
{
"cluster.remote_info": {
"methods": [
"GET"
],
"patterns": [
"_remote/info"
],
"documentation": "https://opensearch.org/docs/latest/opensearch/rest-api/remote-info/"
}
}
| 130 |
674 | <filename>libs/validators/__init__.py
from .between import between # noqa
from .domain import domain # noqa
from .email import email # noqa
from .extremes import Max, Min # noqa
from .i18n import fi_business_id, fi_ssn # noqa
from .iban import iban # noqa
from .ip_address import ipv4, ipv6 # noqa
from .length import length # noqa
from .mac_address import mac_address # noqa
from .slug import slug # noqa
from .truthy import truthy # noqa
from .url import url # noqa
from .utils import ValidationFailure, validator # noqa
from .uuid import uuid # noqa
__version__ = '0.10'
| 204 |
851 | <gh_stars>100-1000
/*
* Copyright 2004 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef P2P_BASE_RELAYSERVER_H_
#define P2P_BASE_RELAYSERVER_H_
#include <map>
#include <string>
#include <vector>
#include "p2p/base/port.h"
#include "p2p/base/stun.h"
#include "rtc_base/asyncudpsocket.h"
#include "rtc_base/random.h"
#include "rtc_base/socketaddresspair.h"
#include "rtc_base/thread.h"
#include "rtc_base/timeutils.h"
namespace cricket {
class RelayServerBinding;
class RelayServerConnection;
// Relays traffic between connections to the server that are "bound" together.
// All connections created with the same username/password are bound together.
class RelayServer : public rtc::MessageHandler, public sigslot::has_slots<> {
public:
// Creates a server, which will use this thread to post messages to itself.
explicit RelayServer(rtc::Thread* thread);
~RelayServer() override;
rtc::Thread* thread() { return thread_; }
// Indicates whether we will print updates of the number of bindings.
bool log_bindings() const { return log_bindings_; }
void set_log_bindings(bool log_bindings) { log_bindings_ = log_bindings; }
// Updates the set of sockets that the server uses to talk to "internal"
// clients. These are clients that do the "port allocations".
void AddInternalSocket(rtc::AsyncPacketSocket* socket);
void RemoveInternalSocket(rtc::AsyncPacketSocket* socket);
// Updates the set of sockets that the server uses to talk to "external"
// clients. These are the clients that do not do allocations. They do not
// know that these addresses represent a relay server.
void AddExternalSocket(rtc::AsyncPacketSocket* socket);
void RemoveExternalSocket(rtc::AsyncPacketSocket* socket);
// Starts listening for connections on this sockets. When someone
// tries to connect, the connection will be accepted and a new
// internal socket will be added.
void AddInternalServerSocket(rtc::AsyncSocket* socket,
cricket::ProtocolType proto);
// Removes this server socket from the list.
void RemoveInternalServerSocket(rtc::AsyncSocket* socket);
// Methods for testing and debuging.
int GetConnectionCount() const;
rtc::SocketAddressPair GetConnection(int connection) const;
bool HasConnection(const rtc::SocketAddress& address) const;
private:
rtc::Thread* thread_;
webrtc::Random random_;
bool log_bindings_;
std::vector<rtc::AsyncPacketSocket*> internal_sockets_;
std::vector<rtc::AsyncPacketSocket*> external_sockets_;
std::vector<rtc::AsyncPacketSocket*> removed_sockets_;
std::map<rtc::AsyncSocket*, cricket::ProtocolType> server_sockets_;
std::map<std::string, RelayServerBinding*> bindings_;
std::map<rtc::SocketAddressPair, RelayServerConnection*> connections_;
// Called when a packet is received by the server on one of its sockets.
void OnInternalPacket(rtc::AsyncPacketSocket* socket,
const char* bytes,
size_t size,
const rtc::SocketAddress& remote_addr,
const rtc::PacketTime& packet_time);
void OnExternalPacket(rtc::AsyncPacketSocket* socket,
const char* bytes,
size_t size,
const rtc::SocketAddress& remote_addr,
const rtc::PacketTime& packet_time);
void OnReadEvent(rtc::AsyncSocket* socket);
// Processes the relevant STUN request types from the client.
bool HandleStun(const char* bytes,
size_t size,
const rtc::SocketAddress& remote_addr,
rtc::AsyncPacketSocket* socket,
std::string* username,
StunMessage* msg);
void HandleStunAllocate(const char* bytes,
size_t size,
const rtc::SocketAddressPair& ap,
rtc::AsyncPacketSocket* socket);
void HandleStun(RelayServerConnection* int_conn,
const char* bytes,
size_t size);
void HandleStunAllocate(RelayServerConnection* int_conn,
const StunMessage& msg);
void HandleStunSend(RelayServerConnection* int_conn, const StunMessage& msg);
// Adds/Removes the a connection or binding.
void AddConnection(RelayServerConnection* conn);
void RemoveConnection(RelayServerConnection* conn);
void RemoveBinding(RelayServerBinding* binding);
// Handle messages in our thread.
void OnMessage(rtc::Message* pmsg) override;
// Called when the timer for checking lifetime times out.
void OnTimeout(RelayServerBinding* binding);
// Accept connections on this server socket.
void AcceptConnection(rtc::AsyncSocket* server_socket);
friend class RelayServerConnection;
friend class RelayServerBinding;
};
// Maintains information about a connection to the server. Each connection is
// part of one and only one binding.
class RelayServerConnection {
public:
RelayServerConnection(RelayServerBinding* binding,
const rtc::SocketAddressPair& addrs,
rtc::AsyncPacketSocket* socket);
~RelayServerConnection();
RelayServerBinding* binding() { return binding_; }
rtc::AsyncPacketSocket* socket() { return socket_; }
// Returns a pair where the source is the remote address and the destination
// is the local address.
const rtc::SocketAddressPair& addr_pair() { return addr_pair_; }
// Sends a packet to the connected client. If an address is provided, then
// we make sure the internal client receives it, wrapping if necessary.
void Send(const char* data, size_t size);
void Send(const char* data, size_t size, const rtc::SocketAddress& ext_addr);
// Sends a STUN message to the connected client with no wrapping.
void SendStun(const StunMessage& msg);
void SendStunError(const StunMessage& request, int code, const char* desc);
// A locked connection is one for which we know the intended destination of
// any raw packet received.
bool locked() const { return locked_; }
void Lock();
void Unlock();
// Records the address that raw packets should be forwarded to (for internal
// packets only; for external, we already know where they go).
const rtc::SocketAddress& default_destination() const {
return default_dest_;
}
void set_default_destination(const rtc::SocketAddress& addr) {
default_dest_ = addr;
}
private:
RelayServerBinding* binding_;
rtc::SocketAddressPair addr_pair_;
rtc::AsyncPacketSocket* socket_;
bool locked_;
rtc::SocketAddress default_dest_;
};
// Records a set of internal and external connections that we relay between,
// or in other words, that are "bound" together.
class RelayServerBinding : public rtc::MessageHandler {
public:
RelayServerBinding(RelayServer* server,
const std::string& username,
const std::string& password,
int lifetime);
~RelayServerBinding() override;
RelayServer* server() { return server_; }
int lifetime() { return lifetime_; }
const std::string& username() { return username_; }
const std::string& password() { return password_; }
const std::string& magic_cookie() { return magic_cookie_; }
// Adds/Removes a connection into the binding.
void AddInternalConnection(RelayServerConnection* conn);
void AddExternalConnection(RelayServerConnection* conn);
// We keep track of the use of each binding. If we detect that it was not
// used for longer than the lifetime, then we send a signal.
void NoteUsed();
sigslot::signal1<RelayServerBinding*> SignalTimeout;
// Determines whether the given packet has the magic cookie present (in the
// right place).
bool HasMagicCookie(const char* bytes, size_t size) const;
// Determines the connection to use to send packets to or from the given
// external address.
RelayServerConnection* GetInternalConnection(
const rtc::SocketAddress& ext_addr);
RelayServerConnection* GetExternalConnection(
const rtc::SocketAddress& ext_addr);
// MessageHandler:
void OnMessage(rtc::Message* pmsg) override;
private:
RelayServer* server_;
std::string username_;
std::string password_;
std::string magic_cookie_;
std::vector<RelayServerConnection*> internal_connections_;
std::vector<RelayServerConnection*> external_connections_;
int lifetime_;
int64_t last_used_;
// TODO(?): bandwidth
};
} // namespace cricket
#endif // P2P_BASE_RELAYSERVER_H_
| 2,994 |
965 | <gh_stars>100-1000
BEGIN_COM_MAP(CBeeper)
COM_INTERFACE_ENTRY(IBeeper)
COM_INTERFACE_ENTRY(IDispatch)
COM_INTERFACE_ENTRY_TEAR_OFF(IID_ISupportErrorInfo, CBeeper2)
END_COM_MAP() | 84 |
1,775 | _base_ = ['./hrnet_w32_coco_256x192.py']
# fp16 settings
fp16 = dict(loss_scale='dynamic')
| 43 |
357 | from .. import Circuit
from .. import gate as g
def flatten(c: Circuit) -> Circuit:
"""expands slice and multiple targets into single target"""
n_qubits = c.n_qubits
ops = []
for op in c.ops:
if isinstance(op, (g.OneQubitGate, g.Reset)):
ops += [op.create(t, op.params, None) for t in op.target_iter(n_qubits)]
elif isinstance(op, g.TwoQubitGate):
ops += [
op.create(t, op.params, None)
for t in op.control_target_iter(n_qubits)
]
elif isinstance(op, g.Measurement):
if op.key is None:
ops += [
op.create(t, op.params, None) for t in op.target_iter(n_qubits)
]
else:
options = {'key': op.key}
if op.duplicated is not None:
options['duplicated'] = op.duplicated
ops += [
op.create(tuple(t for t in op.target_iter(n_qubits)),
op.params, options)
]
else:
raise ValueError(f"Cannot process operation {op.lowername}.")
return Circuit(n_qubits, ops)
| 640 |
345 | #pragma once
#include <string>
#include <memory>
#include <vector>
#include <nlohmann/json.hpp>
#include "Sqex.h"
#include "Sqex_Eqdp.h"
#include "Sqex_EqpGmp.h"
#include "Sqex_Est.h"
#include "Sqex_Imc.h"
namespace Sqex::ThirdParty::TexTools {
struct ModPackEntry {
std::string Name;
std::string Author;
std::string Version;
std::string Url;
};
void to_json(nlohmann::json&, const ModPackEntry&);
void from_json(const nlohmann::json&, ModPackEntry&);
struct ModEntry {
std::string Name;
std::string Category;
std::string FullPath;
uint64_t ModOffset{};
uint64_t ModSize{};
std::string DatFile;
bool IsDefault{};
std::optional<ModPackEntry> ModPack;
std::string ToExpacDatPath() const;
bool IsMetadata() const;
};
void to_json(nlohmann::json&, const ModEntry&);
void from_json(const nlohmann::json&, ModEntry&);
namespace ModPackPage {
struct Option {
std::string Name;
std::string Description;
std::string ImagePath;
std::vector<ModEntry> ModsJsons;
std::string GroupName;
std::string SelectionType;
bool IsChecked;
};
void to_json(nlohmann::json&, const Option&);
void from_json(const nlohmann::json&, Option&);
struct ModGroup {
std::string GroupName;
std::string SelectionType;
std::vector<Option> OptionList;
};
void to_json(nlohmann::json&, const ModGroup&);
void from_json(const nlohmann::json&, ModGroup&);
struct Page {
int PageIndex{};
std::vector<ModGroup> ModGroups;
};
void to_json(nlohmann::json&, const Page&);
void from_json(const nlohmann::json&, Page&);
}
struct TTMPL {
std::string MinimumFrameworkVersion;
std::string FormatVersion;
std::string Name;
std::string Author;
std::string Version;
std::string Description;
std::string Url;
std::vector<ModPackPage::Page> ModPackPages;
std::vector<ModEntry> SimpleModsList;
static TTMPL FromStream(const RandomAccessStream& stream);
enum TraverseCallbackResult {
Continue,
Break,
};
void ForEachEntry(std::function<void(Sqex::ThirdParty::TexTools::ModEntry&)> cb);
void ForEachEntry(std::function<void(const Sqex::ThirdParty::TexTools::ModEntry&)> cb) const;
TraverseCallbackResult ForEachEntryInterruptible(std::function<TraverseCallbackResult(Sqex::ThirdParty::TexTools::ModEntry&)> cb);
TraverseCallbackResult ForEachEntryInterruptible(std::function<TraverseCallbackResult(const Sqex::ThirdParty::TexTools::ModEntry&)> cb) const;
};
void to_json(nlohmann::json&, const TTMPL&);
void from_json(const nlohmann::json&, TTMPL&);
class ItemMetadata {
public:
static constexpr uint32_t Version_Value = 2;
static const srell::u8cregex CharacterMetaPathTest;
static const srell::u8cregex HousingMetaPathTest;
enum class MetaDataType : uint32_t {
Invalid,
Imc,
Eqdp,
Eqp,
Est,
Gmp,
};
enum class TargetEstType {
Invalid,
Face,
Hair,
Head,
Body,
};
enum class TargetItemType {
Invalid,
Equipment,
Accessory,
Housing,
};
class NotItemMetadataError : public std::runtime_error {
using std::runtime_error::runtime_error;
};
#pragma pack(push, 1)
struct MetaDataHeader {
LE<uint32_t> EntryCount;
LE<uint32_t> HeaderSize;
LE<uint32_t> FirstEntryLocatorOffset;
};
struct MetaDataEntryLocator {
LE<MetaDataType> Type;
LE<uint32_t> Offset;
LE<uint32_t> Size;
};
struct EqdpEntry {
uint32_t RaceCode;
uint8_t Value : 2;
uint8_t Padding : 6;
};
static_assert(sizeof EqdpEntry == 5);
struct GmpEntry {
uint32_t Enabled : 1;
uint32_t Animated : 1;
uint32_t RotationA : 10;
uint32_t RotationB : 10;
uint32_t RotationC : 10;
uint8_t UnknownLow : 4;
uint8_t UnknownHigh : 4;
};
static_assert(sizeof GmpEntry == 5);
struct EstEntry {
uint16_t RaceCode;
uint16_t SetId;
uint16_t SkelId;
};
#pragma pack(pop)
const std::vector<uint8_t> Data;
const uint32_t& Version;
const std::string TargetPath;
const std::string SourcePath;
const MetaDataHeader& Header;
const std::span<const MetaDataEntryLocator> AllEntries;
TargetItemType ItemType = TargetItemType::Invalid;
TargetEstType EstType = TargetEstType::Invalid;
std::string PrimaryType;
std::string SecondaryType;
std::string TargetImcPath;
std::string SourceImcPath;
uint16_t PrimaryId = 0;
uint16_t SecondaryId = 0;
size_t SlotIndex = 0;
size_t EqpEntrySize = 0;
size_t EqpEntryOffset = 0;
ItemMetadata(std::string gamePath, const RandomAccessStream& stream);
template<typename T>
std::span<const T> Get(MetaDataType type) const {
for (const auto& entry : AllEntries) {
if (entry.Type != type)
continue;
const auto spanBytes = std::span(Data).subspan(entry.Offset, entry.Size);
return { reinterpret_cast<const T*>(spanBytes.data()), spanBytes.size_bytes() / sizeof T };
}
return {};
}
static std::string EqdpPath(TargetItemType type, uint32_t race) {
switch (type) {
case TargetItemType::Equipment:
return std::format("chara/xls/charadb/equipmentdeformerparameter/c{:04}.eqdp", race);
case TargetItemType::Accessory:
return std::format("chara/xls/charadb/accessorydeformerparameter/c{:04}.eqdp", race);
default:
throw std::invalid_argument("only equipment and accessory have valid eqdp");
}
}
static constexpr auto EqpPath = "chara/xls/equipmentparameter/equipmentparameter.eqp";
static constexpr auto GmpPath = "chara/xls/equipmentparameter/gimmickparameter.gmp";
static const char* EstPath(TargetEstType type) {
switch (type) {
case TargetEstType::Face:
return "chara/xls/charadb/faceskeletontemplate.est";
case TargetEstType::Hair:
return "chara/xls/charadb/hairskeletontemplate.est";
case TargetEstType::Head:
return "chara/xls/charadb/extra_met.est";
case TargetEstType::Body:
return "chara/xls/charadb/extra_top.est";
default:
return nullptr;
}
}
void ApplyImcEdits(std::function<Sqex::Imc::File&()> reader) const;
void ApplyEqdpEdits(std::function<Sqex::Eqdp::ExpandedFile& (TargetItemType, uint32_t)> reader) const;
void ApplyEqpEdits(Sqex::EqpGmp::ExpandedFile& eqp) const;
void ApplyGmpEdits(Sqex::EqpGmp::ExpandedFile& gmp) const;
void ApplyEstEdits(Sqex::Est::File& est) const;
};
} | 2,544 |
5,169 | {
"name": "XTencentOpenAPI",
"version": "3.5.1",
"summary": "A mirror of TencentOpenAPI.",
"homepage": "https://wiki.connect.qq.com",
"license": {
"type": "MIT",
"file": "LICENSE"
},
"authors": {
"tramp": "<EMAIL>"
},
"source": {
"git": "https://github.com/imotoboy/XTencentOpenAPI.git",
"tag": "3.5.1"
},
"frameworks": [
"Security",
"SystemConfiguration",
"CoreGraphics",
"CoreTelephony"
],
"libraries": [
"iconv",
"sqlite3",
"c++",
"z"
],
"vendored_frameworks": "TencentOpenAPI/TencentOpenAPI.framework",
"platforms": {
"ios": "9.0"
},
"ios": {
"frameworks": [
"UIKit",
"Foundation",
"WebKit"
]
},
"requires_arc": true,
"pod_target_xcconfig": {
"EXCLUDED_ARCHS[sdk=iphonesimulator*]": "arm64"
},
"user_target_xcconfig": {
"EXCLUDED_ARCHS[sdk=iphonesimulator*]": "arm64"
}
}
| 436 |
1,061 | package org.sword.wechat4j;
import static org.junit.Assert.*;
import org.junit.Test;
import org.sword.wechat4j.message.CustomerMsg;
import com.alibaba.fastjson.JSONObject;
public class SendMsgTest {
CustomerMsg senMsg = new CustomerMsg("");
@Test
public void testSendText() {
// String expected = "Hello World";
// senMsg.sendText(expected);
//// String actual = senMsg.getMsgBody();
// JSONObject json = JSONObject.parseObject(actual);
// actual = json.getJSONObject("text").getString("content");
//
// assertEquals(expected, actual);
}
// @Test
// public void testSendImage() {
// fail("Not yet implemented");
// }
//
// @Test
// public void testSendVoice() {
// fail("Not yet implemented");
// }
//
// @Test
// public void testSendVideoStringStringStringString() {
// fail("Not yet implemented");
// }
//
// @Test
// public void testSendVideoVideoResponse() {
// fail("Not yet implemented");
// }
//
// @Test
// public void testSendMusicStringStringStringStringString() {
// fail("Not yet implemented");
// }
//
// @Test
// public void testSendMusicMusicResponse() {
// fail("Not yet implemented");
// }
@Test
public void testsendNew() {
// String expected = "Hello World";
// senMsg.sendNew("title", expected, "picUrl", "picUrl");
// String actual = senMsg.getMsgBody();
// JSONObject json = JSONObject.parseObject(actual);
// actual = json.getJSONObject("news").getJSONArray("articles").getJSONObject(0).getString("description");
//
// assertEquals(expected, actual);
}
}
| 517 |
1,905 | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import graphviz
import graphviz.backend
from numpy.distutils.system_info import f2py_info
from sklearn import tree
from sklearn.datasets import load_boston, load_iris, load_wine, load_digits, load_breast_cancer, load_diabetes, fetch_mldata
from matplotlib.figure import figaspect
import string
import re
import matplotlib.pyplot as plt
import seaborn as sns
from dtreeviz.shadow import *
from numbers import Number
import matplotlib.patches as patches
from scipy import stats
from sklearn.neighbors import KernelDensity
import inspect
import sys
import tempfile
from dtreeviz.trees import *
def viz_iris(orientation="TD",
max_depth=3,
random_state=666,
fancy=True,
pickX=False,
label_fontsize=12,
ticks_fontsize=8,
fontname="Arial"):
clf = tree.DecisionTreeClassifier(
max_depth=max_depth, random_state=random_state)
iris = load_iris()
clf.fit(iris.data, iris.target)
if fontname == "TakaoPGothic":
feature_names = list(map(lambda x: f"特徴量{x}", iris.feature_names))
else:
feature_names = iris.feature_names
X = None
if pickX:
X = iris.data[np.random.randint(0, len(iris.data)), :]
viz = dtreeviz(clf,
iris.data,
iris.target,
target_name='variety',
feature_names=feature_names,
orientation=orientation,
class_names=["setosa",
"versicolor",
"virginica"], # 0,1,2 targets
fancy=fancy,
X=X,
label_fontsize=label_fontsize,
ticks_fontsize=ticks_fontsize,
fontname=fontname,
scale=(.5,.5))
return viz
viz_iris().save("/tmp/t.svg")
# viz_iris().view() | 983 |
1,799 | /* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "lite/backends/x86/math/conv_direct.h"
#include <algorithm>
#include <cstring>
#include <iostream>
#include <vector>
#include "lite/backends/x86/math/avx/conv_utils.h"
#include "lite/core/context.h"
#ifdef __AVX__
#include <immintrin.h>
#else
#include <emmintrin.h>
#endif
namespace paddle {
namespace lite {
namespace x86 {
namespace math {
void conv_direct_3x3s2(const float* i_data,
const float* trans_weight,
int bs,
int ic,
int ih,
int iw,
int oc,
int oc_expand,
float* o_data,
int oh,
int ow,
int ph,
int pw,
const float* bias,
lite_api::ActivationType active_type,
operators::ActivationParam act_param) {
constexpr int ww = 3;
constexpr int wh = 3;
constexpr int strideh = 2;
constexpr int stridew = 2;
#ifdef __AVX__
constexpr int BLOCK = 8;
// the sliding window is 5x7 and can obtain 2x3 results! for AVX
constexpr int window_h = 5;
constexpr int window_w = 7;
#else
constexpr int BLOCK = 4;
constexpr int window_h = 5;
constexpr int window_w = 7;
#endif
// The maximum value of the upper left corner of the
// sliding window in h dimension
int new_ih;
int new_iw;
int new_ih_start;
int new_iw_start;
if (ph == 0 && pw == 0) {
// 4 is the stride_h of sliding window
// 6 is the stride_w of sliding window
new_ih = (ih - window_h) / 4 * 4;
new_iw = (iw - window_w) / 6 * 6;
new_ih_start = 0;
new_iw_start = 0;
} else if (ph == 1 && pw == 1) {
new_iw = (iw - window_w - 1) / 6 * 6 + 1;
new_ih = (ih - window_h - 1) / 4 * 4 + 1;
new_ih_start = 1;
new_iw_start = 1;
} else {
LOG(FATAL) << "[X86] conv_direct only support 3x3s2 with padding = 0 or 1";
}
// [0,o_left) in output map needs Special treatment (Left boundary)
// [o_right, ow) in output map needs Special treatment (Right boundary)
// [0,o_upper) same as above (Upper boundary)
// [o_down, oh) same as above (Lower boundary)
int o_left = (new_iw_start + pw) / 2;
int o_right = (new_iw + pw) / 2 + 3;
int o_upper = (new_ih_start + ph) / 2;
int o_down = (new_ih + ph) / 2 + 2;
// The number of channels of convolution kernel
// and the number of input channels are always the same !
int wc = ic;
int ichw = ic * ih * iw;
int ihw = ih * iw;
int wchw = wc * wh * ww;
int whwB = wh * ww * BLOCK;
int ohw = oh * ow;
int ochw = oc * oh * ow;
int owB = ow * BLOCK;
int trans_out_size = oc_expand * ohw;
// holds the intermediate HWC output result
float* trans_out = static_cast<float*>(
TargetMalloc(TARGET(kX86), sizeof(float) * trans_out_size));
// fetch bs_i th input feature map
for (int bs_i = 0; bs_i < bs; bs_i++) {
memset(trans_out, 0, sizeof(float) * trans_out_size);
// Handle upper boundary!
// We dealt with the boundary from the beginning
for (int oh_i = 0; oh_i < o_upper; oh_i++) {
for (int ow_i = 0; ow_i < ow; ow_i++) {
// oh_i and ow_i is the index of the output.
// Next, calculate the index of their corresponding input.
// These two are in the upper left corner of the corresponding
// input!
int ih_i = oh_i * strideh - ph;
int iw_i = ow_i * stridew - pw;
// fetch the ic_i th channel in this input feature map
for (int ic_i = 0; ic_i < wc; ic_i++) {
const float* input_start_address = i_data + bs_i * ichw + ic_i * ihw;
// fetch oc_gi th group kernel,there are BLOCK kernels
// in it. we only need to deal with its ic_i channel !
// oc_gi is oc_group_i !
for (int oc_gi = 0; oc_gi < oc_expand; oc_gi += BLOCK) {
// Now, we need compute the conv of one planar feature map and BLOCK
// planar kernel
// the planar feature map's starting address
const float* kernel_start_address =
trans_weight + oc_gi * wchw +
ic_i * whwB; // the first kernel's address in this BLOCK
float* output_address =
trans_out + oc_gi * ohw + oh_i * ow * BLOCK + ow_i * BLOCK;
// Let's start the convolution of 3x3!
#ifdef __AVX__
__m256 res = _mm256_loadu_ps(output_address);
#else
__m128 res = _mm_loadu_ps(output_address);
#endif
for (int i = 0; i < 3; i++)
for (int j = 0; j < 3; j++) {
int new_ih_i = ih_i + i;
int new_iw_i = iw_i + j;
if (new_ih_i < 0 || new_ih_i >= ih || new_iw_i < 0 ||
new_iw_i >= iw)
continue;
const float* input_address =
input_start_address + new_ih_i * iw + new_iw_i;
#ifdef __AVX__
__m256 input = _mm256_set1_ps(*input_address);
__m256 w =
_mm256_loadu_ps(kernel_start_address + (i * 3 + j) * BLOCK);
res = _mm256_fmadd_ps(input, w, res);
#else
__m128 input = _mm_set1_ps(*input_address);
__m128 w =
_mm_loadu_ps(kernel_start_address + (i * 3 + j) * BLOCK);
res = _mm_fmadd_ps(input, w, res);
#endif
}
#ifdef __AVX__
_mm256_storeu_ps(output_address, res);
#else
_mm_storeu_ps(output_address, res);
#endif
}
}
}
}
// Handle lower boundary!
for (int oh_i = o_down; oh_i < oh; oh_i++) {
for (int ow_i = 0; ow_i < ow; ow_i++) {
int ih_i = oh_i * strideh - ph;
int iw_i = ow_i * stridew - pw;
// fetch the ic_i th channel in this input feature map
for (int ic_i = 0; ic_i < wc; ic_i++) {
const float* input_start_address = i_data + bs_i * ichw + ic_i * ihw;
// fetch oc_gi th group kernel,there are BLOCK kernels
// in it. we only need to deal with its ic_i channel !
// oc_gi is oc_group_i !
for (int oc_gi = 0; oc_gi < oc_expand; oc_gi += BLOCK) {
// Now, we need compute the conv of one planar feature map and BLOCK
// planar kernel
// the planar feature map's starting address
const float* kernel_start_address =
trans_weight + oc_gi * wchw +
ic_i * whwB; // the first kernel's address in this BLOCK
float* output_address =
trans_out + oc_gi * ohw + oh_i * ow * BLOCK + ow_i * BLOCK;
#ifdef __AVX__
__m256 res = _mm256_loadu_ps(output_address);
#else
__m128 res = _mm_loadu_ps(output_address);
#endif
for (int i = 0; i < 3; i++)
for (int j = 0; j < 3; j++) {
int new_ih_i = ih_i + i;
int new_iw_i = iw_i + j;
if (new_ih_i < 0 || new_ih_i >= ih || new_iw_i < 0 ||
new_iw_i >= iw)
continue;
const float* input_address =
input_start_address + new_ih_i * iw + new_iw_i;
#ifdef __AVX__
__m256 input = _mm256_set1_ps(*input_address);
__m256 w =
_mm256_loadu_ps(kernel_start_address + (i * 3 + j) * BLOCK);
res = _mm256_fmadd_ps(input, w, res);
#else
__m128 input = _mm_set1_ps(*input_address);
__m128 w =
_mm_loadu_ps(kernel_start_address + (i * 3 + j) * BLOCK);
res = _mm_fmadd_ps(input, w, res);
#endif
}
#ifdef __AVX__
_mm256_storeu_ps(output_address, res);
#else
_mm_storeu_ps(output_address, res);
#endif
}
}
}
}
// Handle left boundary!
for (int oh_i = 0; oh_i < oh; oh_i++) {
if ((oh_i >= 0 && oh_i < o_upper) || (oh_i >= o_down && oh_i < oh))
continue;
for (int ow_i = 0; ow_i < o_left; ow_i++) {
int ih_i = oh_i * strideh - ph;
int iw_i = ow_i * stridew - pw;
// fetch the ic_i th channel in this input feature map
for (int ic_i = 0; ic_i < wc; ic_i++) {
const float* input_start_address = i_data + bs_i * ichw + ic_i * ihw;
// fetch oc_gi th group kernel,there are BLOCK kernels
// in it. we only need to deal with its ic_i channel !
// oc_gi is oc_group_i !
for (int oc_gi = 0; oc_gi < oc_expand; oc_gi += BLOCK) {
// Now, we need compute the conv of one planar feature map and BLOCK
// planar kernel
// the planar feature map's starting address
const float* kernel_start_address =
trans_weight + oc_gi * wchw +
ic_i * whwB; // the first kernel's address in this BLOCK
float* output_address =
trans_out + oc_gi * ohw + oh_i * ow * BLOCK + ow_i * BLOCK;
#ifdef __AVX__
__m256 res = _mm256_loadu_ps(output_address);
#else
__m128 res = _mm_loadu_ps(output_address);
#endif
for (int i = 0; i < 3; i++)
for (int j = 0; j < 3; j++) {
int new_ih_i = ih_i + i;
int new_iw_i = iw_i + j;
if (new_ih_i < 0 || new_ih_i >= ih || new_iw_i < 0 ||
new_iw_i >= iw)
continue;
const float* input_address =
input_start_address + new_ih_i * iw + new_iw_i;
#ifdef __AVX__
__m256 input = _mm256_set1_ps(*input_address);
__m256 w =
_mm256_loadu_ps(kernel_start_address + (i * 3 + j) * BLOCK);
res = _mm256_fmadd_ps(input, w, res);
#else
__m128 input = _mm_set1_ps(*input_address);
__m128 w =
_mm_loadu_ps(kernel_start_address + (i * 3 + j) * BLOCK);
res = _mm_fmadd_ps(input, w, res);
#endif
}
#ifdef __AVX__
_mm256_storeu_ps(output_address, res);
#else
_mm_storeu_ps(output_address, res);
#endif
}
}
}
}
// Handle right boundary!
for (int oh_i = 0; oh_i < oh; oh_i++) {
if ((oh_i >= 0 && oh_i < o_upper) || (oh_i >= o_down && oh_i < oh))
continue;
for (int ow_i = o_right; ow_i < ow; ow_i++) {
int ih_i = oh_i * strideh - ph;
int iw_i = ow_i * stridew - pw;
// fetch the ic_i th channel in this input feature map
for (int ic_i = 0; ic_i < wc; ic_i++) {
const float* input_start_address = i_data + bs_i * ichw + ic_i * ihw;
// fetch oc_gi th group kernel,there are BLOCK kernels
// in it. we only need to deal with its ic_i channel !
// oc_gi is oc_group_i !
for (int oc_gi = 0; oc_gi < oc_expand; oc_gi += BLOCK) {
// Now, we need compute the conv of one planar feature map and BLOCK
// planar kernel
// the planar feature map's starting address
const float* kernel_start_address =
trans_weight + oc_gi * wchw +
ic_i * whwB; // the first kernel's address in this BLOCK
float* output_address =
trans_out + oc_gi * ohw + oh_i * ow * BLOCK + ow_i * BLOCK;
#ifdef __AVX__
__m256 res = _mm256_loadu_ps(output_address);
#else
__m128 res = _mm_loadu_ps(output_address);
#endif
for (int i = 0; i < 3; i++)
for (int j = 0; j < 3; j++) {
int new_ih_i = ih_i + i;
int new_iw_i = iw_i + j;
if (new_ih_i < 0 || new_ih_i >= ih || new_iw_i < 0 ||
new_iw_i >= iw)
continue;
const float* input_address =
input_start_address + new_ih_i * iw + new_iw_i;
#ifdef __AVX__
__m256 input = _mm256_set1_ps(*input_address);
__m256 w =
_mm256_loadu_ps(kernel_start_address + (i * 3 + j) * BLOCK);
res = _mm256_fmadd_ps(input, w, res);
#else
__m128 input = _mm_set1_ps(*input_address);
__m128 w =
_mm_loadu_ps(kernel_start_address + (i * 3 + j) * BLOCK);
res = _mm_fmadd_ps(input, w, res);
#endif
}
#ifdef __AVX__
_mm256_storeu_ps(output_address, res);
#else
_mm_storeu_ps(output_address, res);
#endif
}
}
}
}
// fetch the ic_i th channel in this input feature map
for (int ic_i = 0; ic_i < wc; ic_i++) {
const float* input_start_address = i_data + bs_i * ichw + ic_i * ihw;
// fetch oc_gi th group kernel,there are BLOCK kernels
// in it. we only need to deal with its ic_i channel !
// oc_gi is oc_group_i !
for (int oc_gi = 0; oc_gi < oc_expand; oc_gi += BLOCK) {
// Now, we need compute the conv of one planar feature map and BLOCK
// planar kernel
// the planar feature map's starting address
const float* kernel_start_address =
trans_weight + oc_gi * wchw +
ic_i * whwB; // the first kernel's address in this BLOCK
float* output_start_address = trans_out + oc_gi * ohw;
/* So far, we have dealt with the special boundary,and now we begin to deal with
* the general situation */
// prefetch the 3x3 conv kernel outside the below two Nested loop !
#ifdef __AVX__
// Take out 9 weight values to the register
__m256 w00 = _mm256_loadu_ps(kernel_start_address + 0 * BLOCK);
__m256 w01 = _mm256_loadu_ps(kernel_start_address + 1 * BLOCK);
__m256 w02 = _mm256_loadu_ps(kernel_start_address + 2 * BLOCK);
__m256 w10 = _mm256_loadu_ps(kernel_start_address + 3 * BLOCK);
__m256 w11 = _mm256_loadu_ps(kernel_start_address + 4 * BLOCK);
__m256 w12 = _mm256_loadu_ps(kernel_start_address + 5 * BLOCK);
__m256 w20 = _mm256_loadu_ps(kernel_start_address + 6 * BLOCK);
__m256 w21 = _mm256_loadu_ps(kernel_start_address + 7 * BLOCK);
__m256 w22 = _mm256_loadu_ps(kernel_start_address + 8 * BLOCK);
#else
// Take out 9 weight values to the register
__m128 w00 = _mm_loadu_ps(kernel_start_address + 0 * BLOCK);
__m128 w01 = _mm_loadu_ps(kernel_start_address + 1 * BLOCK);
__m128 w02 = _mm_loadu_ps(kernel_start_address + 2 * BLOCK);
__m128 w10 = _mm_loadu_ps(kernel_start_address + 3 * BLOCK);
__m128 w11 = _mm_loadu_ps(kernel_start_address + 4 * BLOCK);
__m128 w12 = _mm_loadu_ps(kernel_start_address + 5 * BLOCK);
__m128 w20 = _mm_loadu_ps(kernel_start_address + 6 * BLOCK);
__m128 w21 = _mm_loadu_ps(kernel_start_address + 7 * BLOCK);
__m128 w22 = _mm_loadu_ps(kernel_start_address + 8 * BLOCK);
#endif
// one sliding window cangenerate 2x3 results
// below is the two line's first address the first window generated!
float* output_address0 = output_start_address +
(new_ih_start + ph) / 2 * ow * BLOCK +
(new_iw_start + pw) / 2 * BLOCK;
float* output_address1 = output_address0 + ow * BLOCK;
for (int ih_i = new_ih_start; ih_i <= new_ih; ih_i += 4,
output_address0 += 2 * ow * BLOCK,
output_address1 += 2 * ow * BLOCK) {
// iv is (ih_i~ ih_i + 4)row's first address !
const float* row0 = input_start_address + ih_i * iw;
const float* row1 = row0 + 1 * iw;
const float* row2 = row0 + 2 * iw;
const float* row3 = row0 + 3 * iw;
const float* row4 = row0 + 4 * iw;
// The following is the starting address of
// each line of the sliding window
const float* iv0 = row0 + new_iw_start;
const float* iv1 = row1 + new_iw_start;
const float* iv2 = row2 + new_iw_start;
const float* iv3 = row3 + new_iw_start;
const float* iv4 = row4 + new_iw_start;
// the first line output's address
float* output_address00 = output_address0 + BLOCK * 0;
float* output_address01 = output_address0 + BLOCK * 1;
float* output_address02 = output_address0 + BLOCK * 2;
// the second line output's address
float* output_address10 = output_address1 + BLOCK * 0;
float* output_address11 = output_address1 + BLOCK * 1;
float* output_address12 = output_address1 + BLOCK * 2;
for (int iw_i = new_iw_start; iw_i <= new_iw; iw_i += 6,
iv0 += 6,
iv1 += 6,
iv2 += 6,
iv3 += 6,
iv4 += 6,
output_address00 += 3 * BLOCK,
output_address01 += 3 * BLOCK,
output_address02 += 3 * BLOCK,
output_address10 += 3 * BLOCK,
output_address11 += 3 * BLOCK,
output_address12 += 3 * BLOCK) {
#ifdef __AVX__
// Sliding windows can produce 2x3 results, I now create them
__m256 res00 = _mm256_loadu_ps(output_address00);
__m256 res01 = _mm256_loadu_ps(output_address01);
__m256 res02 = _mm256_loadu_ps(output_address02);
__m256 res10 = _mm256_loadu_ps(output_address10);
__m256 res11 = _mm256_loadu_ps(output_address11);
__m256 res12 = _mm256_loadu_ps(output_address12);
// I have used 15 registers, and there are one left!
// but I will use six reg to hold input data to generate outputs !
// iv0: 0 1 2 3 4 5 6
// 0,1,2 is Responsible for res00
// 2,3,4 is Responsible for res01
// 4,5,6 is Responsible for res01
// iv4: 0 1 2 3 4 5 6
// 0,1,2 is Responsible for res00
// 2,3,4 is Responsible for res01
// 4,5,6 is Responsible for res01
__m256 input00 = _mm256_set1_ps(iv0[0]);
__m256 input02 = _mm256_set1_ps(iv0[2]);
__m256 input04 = _mm256_set1_ps(iv0[4]);
__m256 input10 = _mm256_set1_ps(iv4[0]);
__m256 input12 = _mm256_set1_ps(iv4[2]);
__m256 input14 = _mm256_set1_ps(iv4[4]);
res00 = _mm256_fmadd_ps(input00, w00, res00);
res01 = _mm256_fmadd_ps(input02, w00, res01);
res02 = _mm256_fmadd_ps(input04, w00, res02);
res10 = _mm256_fmadd_ps(input10, w20, res10);
res11 = _mm256_fmadd_ps(input12, w20, res11);
res12 = _mm256_fmadd_ps(input14, w20, res12);
input00 = _mm256_set1_ps(iv0[6]);
input10 = _mm256_set1_ps(iv4[6]);
res00 = _mm256_fmadd_ps(input02, w02, res00);
res01 = _mm256_fmadd_ps(input04, w02, res01);
res02 = _mm256_fmadd_ps(input00, w02, res02);
res10 = _mm256_fmadd_ps(input12, w22, res10);
res11 = _mm256_fmadd_ps(input14, w22, res11);
res12 = _mm256_fmadd_ps(input10, w22, res12);
input00 = _mm256_set1_ps(iv0[1]);
input02 = _mm256_set1_ps(iv0[3]);
input04 = _mm256_set1_ps(iv0[5]);
input10 = _mm256_set1_ps(iv4[1]);
input12 = _mm256_set1_ps(iv4[3]);
input14 = _mm256_set1_ps(iv4[5]);
res00 = _mm256_fmadd_ps(input00, w01, res00);
res01 = _mm256_fmadd_ps(input02, w01, res01);
res02 = _mm256_fmadd_ps(input04, w01, res02);
res10 = _mm256_fmadd_ps(input10, w21, res10);
res11 = _mm256_fmadd_ps(input12, w21, res11);
res12 = _mm256_fmadd_ps(input14, w21, res12);
// iv1: 0 1 2 3 4 5 6
// 0,1,2 is Responsible for res00
// 2,3,4 is Responsible for res01
// 4,5,6 is Responsible for res02
// iv3: 0 1 2 3 4 5 6
// 0,1,2 is Responsible for res10
// 2,3,4 is Responsible for res11
// 4,5,6 is Responsible for res12
input00 = _mm256_set1_ps(iv1[0]);
input02 = _mm256_set1_ps(iv1[2]);
input04 = _mm256_set1_ps(iv1[4]);
input10 = _mm256_set1_ps(iv3[0]);
input12 = _mm256_set1_ps(iv3[2]);
input14 = _mm256_set1_ps(iv3[4]);
res00 = _mm256_fmadd_ps(input00, w10, res00);
res01 = _mm256_fmadd_ps(input02, w10, res01);
res02 = _mm256_fmadd_ps(input04, w10, res02);
res10 = _mm256_fmadd_ps(input10, w10, res10);
res11 = _mm256_fmadd_ps(input12, w10, res11);
res12 = _mm256_fmadd_ps(input14, w10, res12);
input00 = _mm256_set1_ps(iv1[6]);
input10 = _mm256_set1_ps(iv3[6]);
res00 = _mm256_fmadd_ps(input02, w12, res00);
res01 = _mm256_fmadd_ps(input04, w12, res01);
res02 = _mm256_fmadd_ps(input00, w12, res02);
res10 = _mm256_fmadd_ps(input12, w12, res10);
res11 = _mm256_fmadd_ps(input14, w12, res11);
res12 = _mm256_fmadd_ps(input10, w12, res12);
input00 = _mm256_set1_ps(iv1[1]);
input02 = _mm256_set1_ps(iv1[3]);
input04 = _mm256_set1_ps(iv1[5]);
input10 = _mm256_set1_ps(iv3[1]);
input12 = _mm256_set1_ps(iv3[3]);
input14 = _mm256_set1_ps(iv3[5]);
res00 = _mm256_fmadd_ps(input00, w11, res00);
res01 = _mm256_fmadd_ps(input02, w11, res01);
res02 = _mm256_fmadd_ps(input04, w11, res02);
res10 = _mm256_fmadd_ps(input10, w11, res10);
res11 = _mm256_fmadd_ps(input12, w11, res11);
res12 = _mm256_fmadd_ps(input14, w11, res12);
// iv2: 0 1 2 3 4 5 6
// 0,1,2 is Responsible for res00
// 2,3,4 is Responsible for res01
// 4,5,6 is Responsible for res02
// iv2: 0 1 2 3 4 5 6
// 0,1,2 is Responsible for res10
// 2,3,4 is Responsible for res11
// 4,5,6 is Responsible for res12
input00 = _mm256_set1_ps(iv2[0]);
input02 = _mm256_set1_ps(iv2[2]);
input04 = _mm256_set1_ps(iv2[4]);
res00 = _mm256_fmadd_ps(input00, w20, res00);
res01 = _mm256_fmadd_ps(input02, w20, res01);
res02 = _mm256_fmadd_ps(input04, w20, res02);
res10 = _mm256_fmadd_ps(input00, w00, res10);
res11 = _mm256_fmadd_ps(input02, w00, res11);
res12 = _mm256_fmadd_ps(input04, w00, res12);
input00 = _mm256_set1_ps(iv2[6]);
res00 = _mm256_fmadd_ps(input02, w22, res00);
res01 = _mm256_fmadd_ps(input04, w22, res01);
res02 = _mm256_fmadd_ps(input00, w22, res02);
res10 = _mm256_fmadd_ps(input02, w02, res10);
res11 = _mm256_fmadd_ps(input04, w02, res11);
res12 = _mm256_fmadd_ps(input00, w02, res12);
input00 = _mm256_set1_ps(iv2[1]);
input02 = _mm256_set1_ps(iv2[3]);
input04 = _mm256_set1_ps(iv2[5]);
res00 = _mm256_fmadd_ps(input00, w21, res00);
res01 = _mm256_fmadd_ps(input02, w21, res01);
res02 = _mm256_fmadd_ps(input04, w21, res02);
res10 = _mm256_fmadd_ps(input00, w01, res10);
res11 = _mm256_fmadd_ps(input02, w01, res11);
res12 = _mm256_fmadd_ps(input04, w01, res12);
// Store them back
_mm256_storeu_ps(output_address00, res00);
_mm256_storeu_ps(output_address01, res01);
_mm256_storeu_ps(output_address02, res02);
_mm256_storeu_ps(output_address10, res10);
_mm256_storeu_ps(output_address11, res11);
_mm256_storeu_ps(output_address12, res12);
#else
// Sliding windows can produce 2x3 results, I now create them
__m128 res00 = _mm_loadu_ps(output_address00);
__m128 res01 = _mm_loadu_ps(output_address01);
__m128 res02 = _mm_loadu_ps(output_address02);
__m128 res10 = _mm_loadu_ps(output_address10);
__m128 res11 = _mm_loadu_ps(output_address11);
__m128 res12 = _mm_loadu_ps(output_address12);
// I have used 15 registers, and there are one left!
// but I will use six reg to hold input data to generate outputs !
// iv0: 0 1 2 3 4 5 6
// 0,1,2 is Responsible for res00
// 2,3,4 is Responsible for res01
// 4,5,6 is Responsible for res01
// iv4: 0 1 2 3 4 5 6
// 0,1,2 is Responsible for res00
// 2,3,4 is Responsible for res01
// 4,5,6 is Responsible for res01
__m128 input00 = _mm_set1_ps(iv0[0]);
__m128 input02 = _mm_set1_ps(iv0[2]);
__m128 input04 = _mm_set1_ps(iv0[4]);
__m128 input10 = _mm_set1_ps(iv4[0]);
__m128 input12 = _mm_set1_ps(iv4[2]);
__m128 input14 = _mm_set1_ps(iv4[4]);
res00 = _mm_fmadd_ps(input00, w00, res00);
res01 = _mm_fmadd_ps(input02, w00, res01);
res02 = _mm_fmadd_ps(input04, w00, res02);
res10 = _mm_fmadd_ps(input10, w20, res10);
res11 = _mm_fmadd_ps(input12, w20, res11);
res12 = _mm_fmadd_ps(input14, w20, res12);
input00 = _mm_set1_ps(iv0[6]);
input10 = _mm_set1_ps(iv4[6]);
res00 = _mm_fmadd_ps(input02, w02, res00);
res01 = _mm_fmadd_ps(input04, w02, res01);
res02 = _mm_fmadd_ps(input00, w02, res02);
res10 = _mm_fmadd_ps(input12, w22, res10);
res11 = _mm_fmadd_ps(input14, w22, res11);
res12 = _mm_fmadd_ps(input10, w22, res12);
input00 = _mm_set1_ps(iv0[1]);
input02 = _mm_set1_ps(iv0[3]);
input04 = _mm_set1_ps(iv0[5]);
input10 = _mm_set1_ps(iv4[1]);
input12 = _mm_set1_ps(iv4[3]);
input14 = _mm_set1_ps(iv4[5]);
res00 = _mm_fmadd_ps(input00, w01, res00);
res01 = _mm_fmadd_ps(input02, w01, res01);
res02 = _mm_fmadd_ps(input04, w01, res02);
res10 = _mm_fmadd_ps(input10, w21, res10);
res11 = _mm_fmadd_ps(input12, w21, res11);
res12 = _mm_fmadd_ps(input14, w21, res12);
// iv1: 0 1 2 3 4 5 6
// 0,1,2 is Responsible for res00
// 2,3,4 is Responsible for res01
// 4,5,6 is Responsible for res02
// iv3: 0 1 2 3 4 5 6
// 0,1,2 is Responsible for res10
// 2,3,4 is Responsible for res11
// 4,5,6 is Responsible for res12
input00 = _mm_set1_ps(iv1[0]);
input02 = _mm_set1_ps(iv1[2]);
input04 = _mm_set1_ps(iv1[4]);
input10 = _mm_set1_ps(iv3[0]);
input12 = _mm_set1_ps(iv3[2]);
input14 = _mm_set1_ps(iv3[4]);
res00 = _mm_fmadd_ps(input00, w10, res00);
res01 = _mm_fmadd_ps(input02, w10, res01);
res02 = _mm_fmadd_ps(input04, w10, res02);
res10 = _mm_fmadd_ps(input10, w10, res10);
res11 = _mm_fmadd_ps(input12, w10, res11);
res12 = _mm_fmadd_ps(input14, w10, res12);
input00 = _mm_set1_ps(iv1[6]);
input10 = _mm_set1_ps(iv3[6]);
res00 = _mm_fmadd_ps(input02, w12, res00);
res01 = _mm_fmadd_ps(input04, w12, res01);
res02 = _mm_fmadd_ps(input00, w12, res02);
res10 = _mm_fmadd_ps(input12, w12, res10);
res11 = _mm_fmadd_ps(input14, w12, res11);
res12 = _mm_fmadd_ps(input10, w12, res12);
input00 = _mm_set1_ps(iv1[1]);
input02 = _mm_set1_ps(iv1[3]);
input04 = _mm_set1_ps(iv1[5]);
input10 = _mm_set1_ps(iv3[1]);
input12 = _mm_set1_ps(iv3[3]);
input14 = _mm_set1_ps(iv3[5]);
res00 = _mm_fmadd_ps(input00, w11, res00);
res01 = _mm_fmadd_ps(input02, w11, res01);
res02 = _mm_fmadd_ps(input04, w11, res02);
res10 = _mm_fmadd_ps(input10, w11, res10);
res11 = _mm_fmadd_ps(input12, w11, res11);
res12 = _mm_fmadd_ps(input14, w11, res12);
// iv2: 0 1 2 3 4 5 6
// 0,1,2 is Responsible for res00
// 2,3,4 is Responsible for res01
// 4,5,6 is Responsible for res02
// iv2: 0 1 2 3 4 5 6
// 0,1,2 is Responsible for res10
// 2,3,4 is Responsible for res11
// 4,5,6 is Responsible for res12
input00 = _mm_set1_ps(iv2[0]);
input02 = _mm_set1_ps(iv2[2]);
input04 = _mm_set1_ps(iv2[4]);
res00 = _mm_fmadd_ps(input00, w20, res00);
res01 = _mm_fmadd_ps(input02, w20, res01);
res02 = _mm_fmadd_ps(input04, w20, res02);
res10 = _mm_fmadd_ps(input00, w00, res10);
res11 = _mm_fmadd_ps(input02, w00, res11);
res12 = _mm_fmadd_ps(input04, w00, res12);
input00 = _mm_set1_ps(iv2[6]);
res00 = _mm_fmadd_ps(input02, w22, res00);
res01 = _mm_fmadd_ps(input04, w22, res01);
res02 = _mm_fmadd_ps(input00, w22, res02);
res10 = _mm_fmadd_ps(input02, w02, res10);
res11 = _mm_fmadd_ps(input04, w02, res11);
res12 = _mm_fmadd_ps(input00, w02, res12);
input00 = _mm_set1_ps(iv2[1]);
input02 = _mm_set1_ps(iv2[3]);
input04 = _mm_set1_ps(iv2[5]);
res00 = _mm_fmadd_ps(input00, w21, res00);
res01 = _mm_fmadd_ps(input02, w21, res01);
res02 = _mm_fmadd_ps(input04, w21, res02);
res10 = _mm_fmadd_ps(input00, w01, res10);
res11 = _mm_fmadd_ps(input02, w01, res11);
res12 = _mm_fmadd_ps(input04, w01, res12);
// Store them back
_mm_storeu_ps(output_address00, res00);
_mm_storeu_ps(output_address01, res01);
_mm_storeu_ps(output_address02, res02);
_mm_storeu_ps(output_address10, res10);
_mm_storeu_ps(output_address11, res11);
_mm_storeu_ps(output_address12, res12);
#endif
}
}
}
}
// we always assume oc % BLOCK == 0!
// convert trans_out(HWC) to o_data(CHW)!
for (int oc_gi = 0; oc_gi < oc; oc_gi += BLOCK) {
for (int oh_i = 0; oh_i < oh; oh_i++) {
for (int ow_i = 0; ow_i < ow / BLOCK * BLOCK; ow_i += BLOCK) {
// trans_out's start_index, we need fetch 8x8 element;
float* from_address =
trans_out + oc_gi * ohw + oh_i * owB + ow_i * BLOCK;
#ifdef __AVX__
__m256 row0 = _mm256_loadu_ps(from_address + 0 * BLOCK);
__m256 row1 = _mm256_loadu_ps(from_address + 1 * BLOCK);
__m256 row2 = _mm256_loadu_ps(from_address + 2 * BLOCK);
__m256 row3 = _mm256_loadu_ps(from_address + 3 * BLOCK);
__m256 row4 = _mm256_loadu_ps(from_address + 4 * BLOCK);
__m256 row5 = _mm256_loadu_ps(from_address + 5 * BLOCK);
__m256 row6 = _mm256_loadu_ps(from_address + 6 * BLOCK);
__m256 row7 = _mm256_loadu_ps(from_address + 7 * BLOCK);
transpose8_ps(row0, row1, row2, row3, row4, row5, row6, row7);
#else
__m128 row0 = _mm_loadu_ps(from_address + 0 * BLOCK);
__m128 row1 = _mm_loadu_ps(from_address + 1 * BLOCK);
__m128 row2 = _mm_loadu_ps(from_address + 2 * BLOCK);
__m128 row3 = _mm_loadu_ps(from_address + 3 * BLOCK);
_MM_TRANSPOSE4_PS(row0, row1, row2, row3);
#endif
if (bias != nullptr) {
#ifdef __AVX__
row0 = _mm256_add_ps(row0, _mm256_set1_ps(bias[oc_gi + 0]));
row1 = _mm256_add_ps(row1, _mm256_set1_ps(bias[oc_gi + 1]));
row2 = _mm256_add_ps(row2, _mm256_set1_ps(bias[oc_gi + 2]));
row3 = _mm256_add_ps(row3, _mm256_set1_ps(bias[oc_gi + 3]));
row4 = _mm256_add_ps(row4, _mm256_set1_ps(bias[oc_gi + 4]));
row5 = _mm256_add_ps(row5, _mm256_set1_ps(bias[oc_gi + 5]));
row6 = _mm256_add_ps(row6, _mm256_set1_ps(bias[oc_gi + 6]));
row7 = _mm256_add_ps(row7, _mm256_set1_ps(bias[oc_gi + 7]));
#else
row0 = _mm_add_ps(row0, _mm_set1_ps(bias[oc_gi + 0]));
row1 = _mm_add_ps(row1, _mm_set1_ps(bias[oc_gi + 1]));
row2 = _mm_add_ps(row2, _mm_set1_ps(bias[oc_gi + 2]));
row3 = _mm_add_ps(row3, _mm_set1_ps(bias[oc_gi + 3]));
#endif
}
if (active_type == lite_api::ActivationType::kRelu) {
#ifdef __AVX__
__m256 vzero = _mm256_set1_ps(0.f);
row0 = _mm256_max_ps(row0, vzero);
row1 = _mm256_max_ps(row1, vzero);
row2 = _mm256_max_ps(row2, vzero);
row3 = _mm256_max_ps(row3, vzero);
row4 = _mm256_max_ps(row4, vzero);
row5 = _mm256_max_ps(row5, vzero);
row6 = _mm256_max_ps(row6, vzero);
row7 = _mm256_max_ps(row7, vzero);
#else
row0 = _mm_max_ps(row0, _mm_set1_ps(0.f));
row1 = _mm_max_ps(row1, _mm_set1_ps(0.f));
row2 = _mm_max_ps(row2, _mm_set1_ps(0.f));
row3 = _mm_max_ps(row3, _mm_set1_ps(0.f));
#endif
} else if (active_type == lite_api::ActivationType::kRelu6) {
#ifdef __AVX__
__m256 vzero = _mm256_set1_ps(0.f);
__m256 vsix = _mm256_set1_ps(act_param.Relu_clipped_coef);
row0 = _mm256_max_ps(row0, vzero);
row1 = _mm256_max_ps(row1, vzero);
row2 = _mm256_max_ps(row2, vzero);
row3 = _mm256_max_ps(row3, vzero);
row4 = _mm256_max_ps(row4, vzero);
row5 = _mm256_max_ps(row5, vzero);
row6 = _mm256_max_ps(row6, vzero);
row7 = _mm256_max_ps(row7, vzero);
row0 = _mm256_min_ps(row0, vsix);
row1 = _mm256_min_ps(row1, vsix);
row2 = _mm256_min_ps(row2, vsix);
row3 = _mm256_min_ps(row3, vsix);
row4 = _mm256_min_ps(row4, vsix);
row5 = _mm256_min_ps(row5, vsix);
row6 = _mm256_min_ps(row6, vsix);
row7 = _mm256_min_ps(row7, vsix);
#else
__m128 vzero = _mm_set1_ps(0.f);
__m128 vsix = _mm_set1_ps(act_param.Relu_clipped_coef);
row0 = _mm_max_ps(row0, vzero);
row1 = _mm_max_ps(row1, vzero);
row2 = _mm_max_ps(row2, vzero);
row3 = _mm_max_ps(row3, vzero);
row0 = _mm_min_ps(row0, vsix);
row1 = _mm_min_ps(row1, vsix);
row2 = _mm_min_ps(row2, vsix);
row3 = _mm_min_ps(row3, vsix);
#endif
} else if (active_type == lite_api::ActivationType::kLeakyRelu) {
#ifdef __AVX__
__m256 vzero = _mm256_set1_ps(0.f);
__m256 vscale = _mm256_set1_ps(act_param.Leaky_relu_alpha);
row0 = _mm256_blendv_ps(_mm256_mul_ps(row0, vscale),
row0,
_mm256_cmp_ps(row0, vzero, _CMP_GT_OS));
row1 = _mm256_blendv_ps(_mm256_mul_ps(row1, vscale),
row0,
_mm256_cmp_ps(row1, vzero, _CMP_GT_OS));
row2 = _mm256_blendv_ps(_mm256_mul_ps(row2, vscale),
row0,
_mm256_cmp_ps(row2, vzero, _CMP_GT_OS));
row3 = _mm256_blendv_ps(_mm256_mul_ps(row3, vscale),
row0,
_mm256_cmp_ps(row3, vzero, _CMP_GT_OS));
row4 = _mm256_blendv_ps(_mm256_mul_ps(row4, vscale),
row0,
_mm256_cmp_ps(row4, vzero, _CMP_GT_OS));
row5 = _mm256_blendv_ps(_mm256_mul_ps(row5, vscale),
row0,
_mm256_cmp_ps(row5, vzero, _CMP_GT_OS));
row6 = _mm256_blendv_ps(_mm256_mul_ps(row6, vscale),
row0,
_mm256_cmp_ps(row6, vzero, _CMP_GT_OS));
row7 = _mm256_blendv_ps(_mm256_mul_ps(row7, vscale),
row0,
_mm256_cmp_ps(row7, vzero, _CMP_GT_OS));
#else
__m128 vzero = _mm_set1_ps(0.f);
__m128 vscale = _mm_set1_ps(act_param.Leaky_relu_alpha);
row0 = _mm_blendv_ps(_mm_mul_ps(row0, vscale),
row0,
_mm_cmp_ps(row0, vzero, _CMP_GT_OS));
row1 = _mm_blendv_ps(_mm_mul_ps(row1, vscale),
row0,
_mm_cmp_ps(row1, vzero, _CMP_GT_OS));
row2 = _mm_blendv_ps(_mm_mul_ps(row2, vscale),
row0,
_mm_cmp_ps(row2, vzero, _CMP_GT_OS));
row3 = _mm_blendv_ps(_mm_mul_ps(row3, vscale),
row0,
_mm_cmp_ps(row3, vzero, _CMP_GT_OS));
#endif
} else if (active_type == lite_api::ActivationType::kHardSwish) {
#ifdef __AVX__
__m256 vzero = _mm256_set1_ps(0.f);
__m256 voffset = _mm256_set1_ps(act_param.hard_swish_offset);
__m256 vscale = _mm256_set1_ps(1.0 / act_param.hard_swish_scale);
__m256 vthreshold = _mm256_set1_ps(act_param.hard_swish_threshold);
row0 = _mm256_mul_ps(
_mm256_min_ps(
vthreshold,
_mm256_max_ps(_mm256_add_ps(row0, voffset), vzero)),
_mm256_mul_ps(row0, vscale));
row1 = _mm256_mul_ps(
_mm256_min_ps(
vthreshold,
_mm256_max_ps(_mm256_add_ps(row1, voffset), vzero)),
_mm256_mul_ps(row1, vscale));
row2 = _mm256_mul_ps(
_mm256_min_ps(
vthreshold,
_mm256_max_ps(_mm256_add_ps(row2, voffset), vzero)),
_mm256_mul_ps(row2, vscale));
row3 = _mm256_mul_ps(
_mm256_min_ps(
vthreshold,
_mm256_max_ps(_mm256_add_ps(row3, voffset), vzero)),
_mm256_mul_ps(row3, vscale));
row4 = _mm256_mul_ps(
_mm256_min_ps(
vthreshold,
_mm256_max_ps(_mm256_add_ps(row4, voffset), vzero)),
_mm256_mul_ps(row4, vscale));
row5 = _mm256_mul_ps(
_mm256_min_ps(
vthreshold,
_mm256_max_ps(_mm256_add_ps(row5, voffset), vzero)),
_mm256_mul_ps(row5, vscale));
row6 = _mm256_mul_ps(
_mm256_min_ps(
vthreshold,
_mm256_max_ps(_mm256_add_ps(row6, voffset), vzero)),
_mm256_mul_ps(row6, vscale));
row7 = _mm256_mul_ps(
_mm256_min_ps(
vthreshold,
_mm256_max_ps(_mm256_add_ps(row7, voffset), vzero)),
_mm256_mul_ps(row7, vscale));
#else
__m128 vzero = _mm_set1_ps(0.f);
__m256 voffset = _mm_set1_ps(act_param.hard_swish_offset);
__m256 vscale = _mm_set1_ps(1.0 / act_param.hard_swish_scale);
__m256 vthreshold = _mm_set1_ps(act_param.hard_swish_threshold);
row0 = _mm_mul_ps(_mm_min_ps(vthreshold, __mm_max_ps(
__mm_add_ps(row0, voffset), vzero)),
_mm_mul_ps(row0, vscale);
row1 = _mm_mul_ps(_mm_min_ps(vthreshold, __mm_max_ps(
__mm_add_ps(row1, voffset), vzero)),
_mm_mul_ps(row1, vscale);
row2 = _mm_mul_ps(_mm_min_ps(vthreshold, __mm_max_ps(
__mm_add_ps(row2, voffset), vzero)),
_mm_mul_ps(row2, vscale);
row3 = _mm_mul_ps(_mm_min_ps(vthreshold, __mm_max_ps(
__mm_add_ps(row3, voffset), vzero)),
_mm_mul_ps(row3, vscale);
#endif
} else if (active_type == lite_api::ActivationType::kIndentity) {
} else {
LOG(FATAL) << "[X86] unsupported Activation type";
}
float* dst_address =
o_data + bs_i * ochw + oc_gi * ohw + oh_i * ow + ow_i;
#ifdef __AVX__
_mm256_storeu_ps(dst_address + 0 * ohw, row0);
_mm256_storeu_ps(dst_address + 1 * ohw, row1);
_mm256_storeu_ps(dst_address + 2 * ohw, row2);
_mm256_storeu_ps(dst_address + 3 * ohw, row3);
_mm256_storeu_ps(dst_address + 4 * ohw, row4);
_mm256_storeu_ps(dst_address + 5 * ohw, row5);
_mm256_storeu_ps(dst_address + 6 * ohw, row6);
_mm256_storeu_ps(dst_address + 7 * ohw, row7);
#else
_mm_storeu_ps(dst_address + 0 * ohw, row0);
_mm_storeu_ps(dst_address + 1 * ohw, row1);
_mm_storeu_ps(dst_address + 2 * ohw, row2);
_mm_storeu_ps(dst_address + 3 * ohw, row3);
#endif
}
for (int ow_i = ow / BLOCK * BLOCK; ow_i < ow; ow_i++) {
// trans_out
float* from_address =
trans_out + oc_gi * ohw + oh_i * owB + ow_i * BLOCK;
float* dst_address =
o_data + bs_i * ochw + oc_gi * ohw + oh_i * ow + ow_i;
#ifdef __AVX__
__m256 row = _mm256_loadu_ps(from_address);
#else
__m128 row = _mm_loadu_ps(from_address);
#endif
if (bias != nullptr) {
#ifdef __AVX__
row = _mm256_add_ps(row, _mm256_loadu_ps(&bias[oc_gi]));
#else
row = _mm_add_ps(row, _mm_loadu_ps(&bias[oc_gi]));
#endif
}
if (active_type == lite_api::ActivationType::kRelu) {
#ifdef __AVX__
row = _mm256_max_ps(row, _mm256_set1_ps(0.f));
#else
row = _mm_max_ps(row, _mm_set1_ps(0.f));
#endif
} else if (active_type == lite_api::ActivationType::kRelu6) {
#ifdef __AVX__
row = _mm256_max_ps(row, _mm256_set1_ps(0.f));
row =
_mm256_min_ps(row, _mm256_set1_ps(act_param.Relu_clipped_coef));
#else
row = _mm_max_ps(row, _mm_set1_ps(0.f));
row = _mm_min_ps(row, _mm_set1_ps(6.f));
#endif
} else if (active_type == lite_api::ActivationType::kLeakyRelu) {
#ifdef __AVX__
__m256 val_scale =
_mm256_mul_ps(row, _mm256_set1_ps(act_param.Leaky_relu_alpha));
row = _mm256_blendv_ps(
val_scale,
row,
_mm256_cmp_ps(row, _mm256_setzero_ps(), _CMP_GT_OS));
#else
__m128 val_scale =
_mm_mul_ps(row, _mm_set1_ps(act_param.Leaky_relu_alpha));
row = _mm_blendv_ps(
val_scale, row, _mm_cmp_ps(row, _mm_setzero_ps(), _CMP_GT_OS));
#endif
} else if (active_type == lite_api::ActivationType::kHardSwish) {
#ifdef __AVX__
__m256 val_offset =
_mm256_add_ps(row, _mm256_set1_ps(act_param.hard_swish_offset));
__m256 val_scale = _mm256_mul_ps(
row, _mm256_set1_ps(1.0 / act_param.hard_swish_scale));
__m256 val =
_mm256_min_ps(_mm256_set1_ps(act_param.hard_swish_threshold),
_mm256_max_ps(val_offset, _mm256_setzero_ps()));
row = _mm256_mul_ps(val, val_scale);
#else
__m128 val_offset =
_mm_add_ps(row, _mm_set1_ps(act_param.hard_swish_offset));
__m128 val_scale =
_mm_mul_ps(row, _mm_set1_ps(1.0 / act_param.hard_swish_scale));
__m128 val = _mm_min_ps(_mm_set1_ps(act_param.hard_swish_threshold),
__mm_max_ps(val_offset, _mm_setzero_ps()))
row = _mm_mul_ps(val, val_scale);
#endif
} else if (active_type == lite_api::ActivationType::kIndentity) {
} else {
LOG(FATAL) << "[X86] unsupported Activation type";
}
#ifdef __AVX__
*(dst_address + 0 * oh * ow) = (reinterpret_cast<float*>(&row))[0];
*(dst_address + 1 * oh * ow) = (reinterpret_cast<float*>(&row))[1];
*(dst_address + 2 * oh * ow) = (reinterpret_cast<float*>(&row))[2];
*(dst_address + 3 * oh * ow) = (reinterpret_cast<float*>(&row))[3];
*(dst_address + 4 * oh * ow) = (reinterpret_cast<float*>(&row))[4];
*(dst_address + 5 * oh * ow) = (reinterpret_cast<float*>(&row))[5];
*(dst_address + 6 * oh * ow) = (reinterpret_cast<float*>(&row))[6];
*(dst_address + 7 * oh * ow) = (reinterpret_cast<float*>(&row))[7];
#else
*(dst_address + 0 * oh * ow) = (reinterpret_cast<float*>(&row))[0];
*(dst_address + 1 * oh * ow) = (reinterpret_cast<float*>(&row))[1];
*(dst_address + 2 * oh * ow) = (reinterpret_cast<float*>(&row))[2];
*(dst_address + 3 * oh * ow) = (reinterpret_cast<float*>(&row))[3];
#endif
}
}
}
}
TargetFree(TARGET(kX86), trans_out);
}
} // namespace math
} // namespace x86
} // namespace lite
} // namespace paddle
| 25,788 |
312 | <reponame>nnnunnn/occa
#ifndef OCCA_MODES_DPCPP_STREAM_HEADER
#define OCCA_MODES_DPCPP_STREAM_HEADER
#include <occa/internal/core/stream.hpp>
#include <occa/internal/modes/dpcpp/polyfill.hpp>
namespace occa {
namespace dpcpp {
class streamTag;
class stream : public occa::modeStream_t {
public:
::sycl::queue commandQueue;
stream(modeDevice_t *modeDevice_,
const occa::json &properties_,
::sycl::queue commandQueue_);
virtual ~stream()=default;
void finish();
occa::dpcpp::streamTag memcpy(void *dest, const void *src, occa::udim_t num_bytes);
};
}
}
#endif
| 290 |
530 | package org.carlspring.strongbox.client;
/**
* This exception is thrown during the resolution or deployment of artifacts.
*
* @author mtodorov
*/
public class ArtifactTransportException
extends Exception
{
public ArtifactTransportException()
{
}
public ArtifactTransportException(String message)
{
super(message);
}
public ArtifactTransportException(String message,
Throwable cause)
{
super(message, cause);
}
public ArtifactTransportException(Throwable cause)
{
super(cause);
}
public ArtifactTransportException(String message,
Throwable cause,
boolean enableSuppression,
boolean writableStackTrace)
{
super(message, cause, enableSuppression, writableStackTrace);
}
}
| 410 |
1,602 | <filename>third-party/qthread/qthread-src/src/queue.c
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include "qthread/qthread.h"
#include "qt_alloc.h"
#include "qt_mpool.h"
#include "qt_asserts.h"
#include "qt_debug.h"
#include "qt_threadstate.h"
#include "qt_qthread_mgmt.h" /* for qthread_internal_self() */
#include "qt_qthread_struct.h" /* to pass data back to worker */
#include "qt_visibility.h"
#ifndef UNPOOLED
# include "qt_subsystems.h" /* for qthread_internal_cleanup() */
#endif
#include "qthread_innards.h" /* for qlib */
#include "qt_queue.h"
/* Memory Management */
#ifdef UNPOOLED
# define ALLOC_TQNODE() (qthread_queue_node_t *)MALLOC(sizeof(qthread_queue_node_t))
# define FREE_TQNODE(n) FREE((n), sizeof(qthread_queue_node_t))
void INTERNAL qthread_queue_subsystem_init(void) {}
#else
static qt_mpool node_pool = NULL;
# define ALLOC_TQNODE() (qthread_queue_node_t *)qt_mpool_alloc(node_pool)
# define FREE_TQNODE(n) qt_mpool_free(node_pool, (n))
static void qthread_queue_subsystem_shutdown(void)
{
qt_mpool_destroy(node_pool);
}
void INTERNAL qthread_queue_subsystem_init(void)
{
node_pool = qt_mpool_create(sizeof(qthread_queue_node_t));
qthread_internal_cleanup(qthread_queue_subsystem_shutdown);
}
#endif /* if defined(UNPOOLED_QUEUES) || defined(UNPOOLED) */
qthread_queue_t API_FUNC qthread_queue_create(uint8_t flags,
aligned_t length)
{
qthread_queue_t q = qt_calloc(1, sizeof(struct qthread_queue_s));
assert(q);
if (flags & QTHREAD_QUEUE_MULTI_JOIN) {
q->type = NEMESIS;
} else if (flags & QTHREAD_QUEUE_MULTI_JOIN_LENGTH) {
q->type = NEMESIS_LENGTH;
} else if (flags & QTHREAD_QUEUE_CAPPED) {
q->type = CAPPED;
q->q.capped.maxmembers = (aligned_t)length;
q->q.capped.membercount = 0;
q->q.capped.busy = 0;
q->q.capped.members = MALLOC(sizeof(qthread_t *) * length);
assert(q->q.capped.members);
} else {
q->type = NOSYNC;
}
return q;
}
aligned_t API_FUNC qthread_queue_length(qthread_queue_t q)
{
assert(q);
switch(q->type) {
case NEMESIS_LENGTH:
return q->q.nemesis.length;
case CAPPED:
return q->q.capped.membercount;
default:
return 0;
}
}
int API_FUNC qthread_queue_join(qthread_queue_t q)
{
assert(q);
qthread_t *me = qthread_internal_self();
me->thread_state = QTHREAD_STATE_QUEUE;
me->rdata->blockedon.queue = q;
qthread_back_to_master(me);
return QTHREAD_SUCCESS;
}
void INTERNAL qthread_queue_internal_enqueue(qthread_queue_t q,
qthread_t *t)
{
switch(q->type) {
case NOSYNC:
qthread_queue_internal_nosync_enqueue(&q->q.nosync, t);
break;
case NEMESIS:
qthread_queue_internal_NEMESIS_enqueue(&q->q.nemesis, t);
break;
case NEMESIS_LENGTH:
qthread_queue_internal_NEMESIS_enqueue(&q->q.nemesis, t);
qthread_incr(&q->q.nemesis.length, 1);
break;
case CAPPED:
qthread_queue_internal_capped_enqueue(&q->q.capped, t);
break;
case MTS:
QTHREAD_TRAP();
}
}
static void qthread_queue_internal_launch(qthread_t *t,
qthread_shepherd_t *cur_shep)
{
assert(t);
assert(cur_shep);
t->thread_state = QTHREAD_STATE_RUNNING;
if ((t->flags & QTHREAD_UNSTEALABLE) && (t->rdata->shepherd_ptr != cur_shep)) {
qthread_debug(FEB_DETAILS, "qthread(%p:%i) enqueueing in target_shep's ready queue (%p:%i)\n", t, (int)t->thread_id, t->rdata->shepherd_ptr, (int)t->rdata->shepherd_ptr->shepherd_id);
qt_threadqueue_enqueue(t->rdata->shepherd_ptr->ready, t);
} else
#ifdef QTHREAD_USE_SPAWNCACHE
if (!qt_spawncache_spawn(t, cur_shep->ready))
#endif
{
qthread_debug(FEB_DETAILS, "qthread(%p:%i) enqueueing in cur_shep's ready queue (%p:%i)\n", t, (int)t->thread_id, cur_shep, (int)cur_shep->shepherd_id);
qt_threadqueue_enqueue(cur_shep->ready, t);
}
}
int API_FUNC qthread_queue_release_one(qthread_queue_t q)
{
assert(q);
qthread_t *t;
switch(q->type) {
case NOSYNC:
t = qthread_queue_internal_nosync_dequeue(&q->q.nosync);
break;
case NEMESIS:
t = qthread_queue_internal_NEMESIS_dequeue(&q->q.nemesis);
break;
case NEMESIS_LENGTH:
t = qthread_queue_internal_NEMESIS_dequeue(&q->q.nemesis);
qthread_incr(&q->q.nemesis.length, -1);
break;
case CAPPED:
t = qthread_queue_internal_capped_dequeue(&q->q.capped);
break;
default:
QTHREAD_TRAP();
}
qthread_shepherd_id_t destination = t->target_shepherd;
if (destination == NO_SHEPHERD) {
qthread_queue_internal_launch(t, qthread_internal_getshep());
} else {
qthread_queue_internal_launch(t, &qlib->shepherds[destination]);
}
return QTHREAD_SUCCESS;
}
int API_FUNC qthread_queue_release_all(qthread_queue_t q)
{
assert(q);
qthread_t *t;
qthread_shepherd_t *shep = qthread_internal_getshep();
qthread_debug(FEB_DETAILS, "releasing all members of queue %p\n", q);
switch(q->type) {
case NOSYNC:
while ((t = qthread_queue_internal_nosync_dequeue(&q->q.nosync)) != NULL) {
qthread_queue_internal_launch(t, shep);
}
break;
case NEMESIS:
while ((t = qthread_queue_internal_NEMESIS_dequeue(&q->q.nemesis)) != NULL) {
qthread_queue_internal_launch(t, shep);
}
break;
case NEMESIS_LENGTH:
{
const aligned_t count = q->q.nemesis.length;
for (aligned_t c = 0; c < count; c++) {
t = qthread_queue_internal_NEMESIS_dequeue(&q->q.nemesis);
assert(t);
if (t) { qthread_queue_internal_launch(t, shep); }
}
qthread_incr(&q->q.nemesis.length, -count);
break;
}
case CAPPED:
{
const size_t membercount = q->q.capped.membercount;
qthread_t **members_copy = MALLOC(sizeof(qthread_t *) * membercount);
assert(members_copy);
while (q->q.capped.busy != 0) SPINLOCK_BODY();
memcpy(members_copy, q->q.capped.members, sizeof(qthread_t *) * membercount);
memset(q->q.capped.members, 0, sizeof(qthread_t *) * membercount);
if (membercount == q->q.capped.maxmembers) {
q->q.capped.membercount = 0;
}
for (size_t c = 0; c < q->q.capped.membercount; c++) {
if (members_copy[c] != NULL) { qthread_queue_internal_launch(members_copy[c], shep); }
}
qt_free(members_copy);
break;
}
default:
QTHREAD_TRAP();
}
return QTHREAD_SUCCESS;
}
int API_FUNC qthread_queue_destroy(qthread_queue_t q)
{
assert(q);
switch(q->type) {
case NOSYNC:
case NEMESIS:
case NEMESIS_LENGTH:
break;
case CAPPED:
FREE(q->q.capped.members, sizeof(qthread_t *) * q->q.capped.maxmembers);
break;
default:
QTHREAD_TRAP();
}
FREE(q, sizeof(struct qthread_queue_s));
return QTHREAD_SUCCESS;
}
void INTERNAL qthread_queue_internal_nosync_enqueue(qthread_queue_nosync_t *q,
qthread_t *t)
{
qthread_queue_node_t *node = ALLOC_TQNODE();
assert(node);
assert(q);
assert(t);
node->thread = t;
node->next = NULL;
if (q->tail == NULL) {
q->head = node;
} else {
q->tail->next = node;
}
q->tail = node;
}
qthread_t INTERNAL *qthread_queue_internal_nosync_dequeue(qthread_queue_nosync_t *q)
{
qthread_queue_node_t *node;
qthread_t *t = NULL;
assert(q);
node = q->head;
if (node) {
q->head = node->next;
t = node->thread;
FREE_TQNODE(node);
}
return t;
}
void INTERNAL qthread_queue_internal_NEMESIS_enqueue(qthread_queue_NEMESIS_t *q,
qthread_t *t)
{
qthread_queue_node_t *node, *prev;
node = ALLOC_TQNODE();
assert(node != NULL);
node->thread = t;
node->next = NULL;
prev = qt_internal_atomic_swap_ptr((void **)&(q->tail), node);
if (prev == NULL) {
q->head = node;
} else {
prev->next = node;
}
}
qthread_t INTERNAL *qthread_queue_internal_NEMESIS_dequeue(qthread_queue_NEMESIS_t *q)
{
if (!q->shadow_head) {
if (!q->head) {
return NULL;
}
q->shadow_head = q->head;
q->head = NULL;
}
qthread_queue_node_t *const dequeued = q->shadow_head;
if (dequeued != NULL) {
if (dequeued->next != NULL) {
q->shadow_head = dequeued->next;
dequeued->next = NULL;
} else {
qthread_queue_node_t *old;
q->shadow_head = NULL;
old = qthread_cas_ptr(&(q->tail), dequeued, NULL);
if (old != dequeued) {
while (dequeued->next == NULL) SPINLOCK_BODY();
q->shadow_head = dequeued->next;
dequeued->next = NULL;
}
}
qthread_t *retval = dequeued->thread;
FREE_TQNODE(dequeued);
return retval;
} else {
return NULL;
}
}
void INTERNAL qthread_queue_internal_capped_enqueue(qthread_queue_capped_t *q,
qthread_t *t)
{
aligned_t offset;
assert(q);
assert(t);
if (q->membercount >= q->maxmembers) { return; }
qthread_incr(&q->busy, 1);
offset = qthread_incr(&q->membercount, 1);
qassert_retvoid(offset >= q->maxmembers);
q->members[offset] = t;
qthread_incr(&q->busy, -1);
}
qthread_t INTERNAL *qthread_queue_internal_capped_dequeue(qthread_queue_capped_t *q)
{
assert(q);
qthread_t *t = NULL;
size_t i = 0;
while (q->busy != 0) SPINLOCK_BODY();
for (; i < q->membercount && q->members[i] == NULL; i++) ;
if (i < q->membercount) {
assert(q->members[i]);
t = q->members[i];
q->members[i] = NULL;
}
return t;
}
/* vim:set expandtab: */
| 5,572 |
390 | <gh_stars>100-1000
/* Copyright 2020-present Cornell University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* <NAME> (<EMAIL>)
*
*/
#include "psa_random.h"
#include <bm/bm_sim/logger.h>
#include <random>
#include <thread>
#include <iostream>
namespace bm {
namespace psa {
void
PSA_Random::init() {
min_val = min.get_uint64();
max_val = max.get_uint64();
_BM_ASSERT((max_val > min_val) && "[Error] Random number range must be positive.");
/* Note: Even though PSA spec mentioned range should be a power of 2 for
* max portability, bmv2 does not impose this restriction.
*/
}
void
PSA_Random::read(Data &value) {
using engine = std::default_random_engine;
using hash = std::hash<std::thread::id>;
static thread_local engine generator(hash()(std::this_thread::get_id()));
using distrib64 = std::uniform_int_distribution<uint64_t>;
distrib64 distribution(min_val, max_val);
value.set(distribution(generator));
}
BM_REGISTER_EXTERN_W_NAME(Random, PSA_Random);
BM_REGISTER_EXTERN_W_NAME_METHOD(Random, PSA_Random, read, Data &);
} // namespace bm::psa
} // namespace bm
int import_random(){
return 0;
}
| 546 |
679 | /**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
#ifndef __FRAMEWORK_CLASSES_WILDCARD_HXX_
#define __FRAMEWORK_CLASSES_WILDCARD_HXX_
//_________________________________________________________________________________________________________________
// my own includes
//_________________________________________________________________________________________________________________
#include <macros/debug.hxx>
//_________________________________________________________________________________________________________________
// interface includes
//_________________________________________________________________________________________________________________
//_________________________________________________________________________________________________________________
// other includes
//_________________________________________________________________________________________________________________
#include <rtl/ustring.hxx>
//_________________________________________________________________________________________________________________
// const
//_________________________________________________________________________________________________________________
//_________________________________________________________________________________________________________________
// namespace
//_________________________________________________________________________________________________________________
namespace framework{
//_________________________________________________________________________________________________________________
// declarations
//_________________________________________________________________________________________________________________
/*-************************************************************************************************************//**
@short implement wildcard-mechanism for unicode
@descr This class can be used to get information about the matching of a pattern to a given text.
It's suitable for 8-Bit- AND 16-Bit-strings!
@implements -
@base -
@ATTENTION This class is'nt threadsafe!
@devstatus deprecated
*//*-*************************************************************************************************************/
class Wildcard
{
//-------------------------------------------------------------------------------------------------------------
// public methods
//-------------------------------------------------------------------------------------------------------------
public:
//---------------------------------------------------------------------------------------------------------
// constructor / destructor
//---------------------------------------------------------------------------------------------------------
/*-****************************************************************************************************//**
@short standard ctor
@descr We do nothing here.
@seealso -
@param -
@return -
@onerror -
*//*-*****************************************************************************************************/
Wildcard();
/*-****************************************************************************************************//**
@short standard dtor
@descr We do nothing here.
@seealso -
@param -
@return -
@onerror -
*//*-*****************************************************************************************************/
virtual ~Wildcard();
//---------------------------------------------------------------------------------------------------------
// interface
//---------------------------------------------------------------------------------------------------------
/*-****************************************************************************************************//**
@short try to find an agreement between given text and searchpattern
@descr You can use wildcards in pattern only!
@seealso -
@param "sText" is the text, in which we search given pattern.
@param "sPattern" is the searched pattern with includes wildcards.
@return sal_True , if pattern was found.
@return sal_False, if pattern don't match the text.
@onerror -
*//*-*****************************************************************************************************/
static sal_Bool match( const ::rtl::OUString& sText ,
const ::rtl::OUString& sPattern );
//---------------------------------------------------------------------------------------------------------
// debug and test methods
//---------------------------------------------------------------------------------------------------------
/*-****************************************************************************************************//**
@short debug-methods to check incoming parameter of some other mehods of this class
@descr The follow methods are used to check parameters for other methods
of this class. The return value is used directly for an ASSERT(...).
This mechanism is active in debug version only!
@seealso FRAMEWORK_ASSERT in implementation!
@param references to checking variables
@return sal_False on invalid parameter
@return sal_True otherwise
@onerror -
*//*-*****************************************************************************************************/
#ifdef ENABLE_ASSERTIONS
static sal_Bool impldbg_checkParameter_match( const ::rtl::OUString& sText ,
const ::rtl::OUString& sPattern );
#endif // #ifdef ENABLE_ASSERTIONS
/*-****************************************************************************************************//**
@short test implementation of match() with different examples
@descr If TESTMODE activated, you cann call these method to start and log some special examples.
Do this if you have changed the implementation of method match() to test it.
@seealso -
@param -
@return -
@onerror Error-conditions are written to file or show in a messagebox.
Thhat depends from current setting of ASSERT_OUTPUTTYPE. (see debug.hxx for further informations.)
*//*-*****************************************************************************************************/
#ifdef ENABLE_CLASSDEBUG
void impldbg_testWildcard();
#endif // #ifdef ENABLE_CLASSDEBUG
}; // class Wildcard
} // namespace framework
#endif // #ifndef __FRAMEWORK_CLASSES_WILDCARD_HXX_
| 1,556 |
493 | <gh_stars>100-1000
package com.jdh.microcraft.gui.mainmenu;
import com.jdh.microcraft.Global;
import com.jdh.microcraft.gfx.Font;
import com.jdh.microcraft.gfx.Renderer;
import com.jdh.microcraft.gui.DialogMenu;
public class AboutMenu extends DialogMenu {
public AboutMenu() {
super(
"ABOUT",
new String [] {
"",
Font.Colors.YELLOW + "MINICRAFT " + Font.Colors.WHITE + "MADE BY " + Font.Colors.YELLOW + "NOTCH",
"FOR " + Font.Colors.BLUE + "LUDUM DARE 22" + Font.Colors.WHITE + " IN 2011.",
"",
"",
Font.Colors.GREEN + "MICROCRAFT" + Font.Colors.WHITE + " REMAKE BY " + Font.Colors.GREEN + "JDH",
"FOR " + Font.Colors.ORANGE + "FUN" + Font.Colors.WHITE + " IN 2020.",
"",
Font.Colors.GREY + "GITHUB.COM/JDAH",
Font.Colors.GREY + "YOUTUBE.COM/C/JDHVIDEO"
},
Renderer.WIDTH / 8, Renderer.HEIGHT / 8,
() -> Global.mainMenu.menu = Global.mainMenu.mainMenu
);
}
@Override
protected boolean shouldCenterText() {
return true;
}
}
| 617 |
47,390 | <filename>src/textord/tablerecog.h<gh_stars>1000+
///////////////////////////////////////////////////////////////////////
// File: tablerecog.h
// Description: Functions to detect structure of tables.
// Author: <NAME>
//
// (C) Copyright 2010, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TABLERECOG_H_
#define TABLERECOG_H_
#include "colpartitiongrid.h"
namespace tesseract {
// There are 2 classes in this file. They have 2 different purposes.
// - StructuredTable contains the methods to find the structure given
// a specific bounding box and grow that structure.
// - TableRecognizer contains the methods to adjust the possible positions
// of a table without worrying about structure.
//
// To use these classes, the assumption is that the TableFinder will
// have a guess of the location of a table (or possibly over/undersegmented
// tables). The TableRecognizer is responsible for finding the table boundaries
// at a high level. The StructuredTable class is responsible for determining
// the structure of the table and trying to maximize its bounds while retaining
// the structure.
// (The latter part is not implemented yet, but that was the goal).
//
// While on the boundary discussion, keep in mind that this is a first pass.
// There should eventually be some things like internal structure checks,
// and, more importantly, surrounding text flow checks.
//
// Usage:
// The StructuredTable class contains methods to query a potential table.
// It has functions to find structure, count rows, find ColPartitions that
// intersect gridlines, etc. It is not meant to blindly find a table. It
// is meant to start with a known table location and enhance it.
// Usage:
// ColPartitionGrid text_grid, line_grid; // init
// TBOX table_box; // known location of table location
//
// StructuredTable table;
// table.Init(); // construction code
// table.set_text_grid(/* text */); // These 2 grids can be the same!
// table.set_line_grid(/* lines */);
// table.set_min_text_height(10); // Filter vertical and tall text.
// // IMPORTANT! The table needs to be told where it is!
// table.set_bounding_box(table_box); // Set initial table location.
// if (table.FindWhitespacedStructure()) {
// // process table
// table.column_count(); // number of columns
// table.row_count(); // number of rows
// table.cells_count(); // number of cells
// table.bounding_box(); // updated bounding box
// // etc.
// }
//
class TESS_API StructuredTable {
public:
StructuredTable();
~StructuredTable() = default;
// Initialization code. Must be called after the constructor.
void Init();
// Sets the grids used by the table. These can be changed between
// calls to Recognize. They are treated as read-only data.
void set_text_grid(ColPartitionGrid *text);
void set_line_grid(ColPartitionGrid *lines);
// Filters text partitions that are ridiculously tall to prevent
// merging rows.
void set_max_text_height(int height);
// Basic accessors. Some are treated as attributes despite having indirect
// representation.
bool is_lined() const;
unsigned row_count() const;
unsigned column_count() const;
unsigned cell_count() const;
void set_bounding_box(const TBOX &box);
const TBOX &bounding_box() const;
int median_cell_height();
int median_cell_width();
int row_height(unsigned row) const;
int column_width(unsigned column) const;
int space_above() const;
int space_below() const;
// Given enough horizontal and vertical lines in a region, create this table
// based on the structure given by the lines. Return true if it worked out.
// Code assumes the lines exist. It is the caller's responsibility to check
// for lines and find an appropriate bounding box.
bool FindLinedStructure();
// The main subroutine for finding generic table structure. The function
// finds the grid structure in the given box. Returns true if a good grid
// exists, implying that "this" table is valid.
bool FindWhitespacedStructure();
////////
//////// Functions to query table info.
////////
// Returns true if inserting part into the table does not cause any
// cell merges.
bool DoesPartitionFit(const ColPartition &part) const;
// Checks if a sub-table has multiple data cells filled.
int CountFilledCells();
int CountFilledCellsInRow(int row);
int CountFilledCellsInColumn(int column);
int CountFilledCells(unsigned row_start, unsigned row_end, unsigned column_start, unsigned column_end);
// Makes sure that at least one cell in a row has substantial area filled.
// This can filter out large whitespace caused by growing tables too far
// and page numbers.
// (currently bugged for some reason).
bool VerifyRowFilled(int row);
// Finds the filled area in a cell.
double CalculateCellFilledPercentage(unsigned row, unsigned column);
// Debug display, draws the table in the given color. If the table is not
// valid, the table and "best" grid lines are still drawn in the given color.
void Display(ScrollView *window, ScrollView::Color color);
protected:
// Clear the structure information.
void ClearStructure();
////////
//////// Lined tables
////////
// Verifies the lines do not intersect partitions. This happens when
// the lines are in column boundaries and extend the full page. As a result,
// the grid lines go through column text. The condition is detectable.
bool VerifyLinedTableCells();
////////
//////// Tables with whitespace
////////
// This is the function to change if you want to filter resulting tables
// better. Right now it just checks for a minimum cell count and such.
// You could add things like maximum number of ColPartitions per cell or
// similar.
bool VerifyWhitespacedTable();
// Find the columns of a table using whitespace.
void FindWhitespacedColumns();
// Find the rows of a table using whitespace.
void FindWhitespacedRows();
////////
//////// Functions to provide information about the table.
////////
// Calculates the whitespace around the table using the table boundary and
// the supplied grids (set_text_grid and set_line_grid).
void CalculateMargins();
// Update the table margins with the supplied grid. This is
// only called by calculate margins to use multiple grid sources.
void UpdateMargins(ColPartitionGrid *grid);
int FindVerticalMargin(ColPartitionGrid *grid, int start_x, bool decrease) const;
int FindHorizontalMargin(ColPartitionGrid *grid, int start_y, bool decrease) const;
// Calculates stats on the table, namely the median cell height and width.
void CalculateStats();
////////
//////// Functions to try to "fix" some table errors.
////////
// Given a whitespaced table, this looks for bordering lines that might
// be page layout boxes around the table. It is necessary to get the margins
// correct on the table. If the lines are not joined, the margins will be
// the distance to the line, which is not right.
void AbsorbNearbyLines();
// Nice utility function for finding partition gaps. You feed it a sorted
// list of all of the mins/maxes of the partitions in the table, and it gives
// you the gaps (middle). This works for both vertical and horizontal
// gaps.
//
// If you want to allow slight overlap in the division and the partitions,
// just scale down the partitions before inserting them in the list.
// Likewise, you can force at least some space between partitions.
// This trick is how the horizontal partitions are done (since the page
// skew could make it hard to find splits in the text).
//
// As a result, "0 distance" between closest partitions causes a gap.
// This is not a programmatic assumption. It is intentional and simplifies
// things.
//
// "max_merged" indicates both the minimum number of stacked partitions
// to cause a cell (add 1 to it), and the maximum number of partitions that
// a grid line can intersect. For example, if max_merged is 0, then lines
// are inserted wherever space exists between partitions. If it is 2,
// lines may intersect 2 partitions at most, but you also need at least
// 2 partitions to generate a line.
static void FindCellSplitLocations(const std::vector<int> &min_list,
const std::vector<int> &max_list, int max_merged,
std::vector<int> *locations);
////////
//////// Utility function for table queries
////////
// Counts the number of ColPartitions that intersect vertical cell
// division at this x value. Used by VerifyLinedTable.
int CountVerticalIntersections(int x);
int CountHorizontalIntersections(int y);
// Counts how many text partitions are in this box.
int CountPartitions(const TBOX &box);
////////
//////// Data members.
////////
// Input data, used as read only data to make decisions.
ColPartitionGrid *text_grid_; // Text ColPartitions
ColPartitionGrid *line_grid_; // Line ColPartitions
// Table structure.
// bounding box is a convenient external representation.
// cell_x_ and cell_y_ indicate the grid lines.
TBOX bounding_box_; // Bounding box
std::vector<int> cell_x_; // Locations of vertical divisions (sorted)
std::vector<int> cell_y_; // Locations of horizontal divisions (sorted)
bool is_lined_; // Is the table backed up by a line structure
// Table margins, set via CalculateMargins
int space_above_;
int space_below_;
int space_left_;
int space_right_;
int median_cell_height_;
int median_cell_width_;
// Filters, used to prevent awkward partitions from destroying structure.
int max_text_height_;
};
class TESS_API TableRecognizer {
public:
TableRecognizer() = default;
~TableRecognizer() = default;
// Initialization code. Must be called after the constructor.
void Init();
////////
//////// Pre-recognize methods to initial table constraints.
////////
// Sets the grids used by the table. These can be changed between
// calls to Recognize. They are treated as read-only data.
void set_text_grid(ColPartitionGrid *text);
void set_line_grid(ColPartitionGrid *lines);
// Sets some additional constraints on the table.
void set_min_height(int height);
void set_min_width(int width);
// Filters text partitions that are ridiculously tall to prevent
// merging rows. Note that "filters" refers to allowing horizontal
// cells to slice through them on the premise that they were
// merged text rows during previous layout.
void set_max_text_height(int height);
// Given a guess location, the RecognizeTable function will try to find a
// structured grid in the area. On success, it will return a new
// StructuredTable (and assumes you will delete it). Otherwise,
// nullptr is returned.
//
// Keep in mind, this may "overgrow" or "undergrow" the size of guess.
// Ideally, there is a either a one-to-one correspondence between
// the guess and table or no table at all. This is not the best of
// assumptions right now, but was made to try to keep things simple in
// the first pass.
//
// If a line structure is available on the page in the given region,
// the table will use the linear structure as it is.
// Otherwise, it will try to maximize the whitespace around it while keeping
// a grid structure. This is somewhat working.
//
// Since the combination of adjustments can get high, effort was
// originally made to keep the number of adjustments linear in the number
// of partitions. The underlying structure finding code used to be
// much more complex. I don't know how necessary this constraint is anymore.
// The evaluation of a possible table is kept within O(nlogn) in the size of
// the table (where size is the number of partitions in the table).
// As a result, the algorithm is capable of O(n^2 log n). Depending
// on the grid search size, it may be higher.
//
// Last note: it is possible to just try all partition boundaries at a high
// level O(n^4) and do a verification scheme (at least O(nlogn)). If there
// area 200 partitions on a page, this could be too costly. Effort could go
// into pruning the search, but I opted for something quicker. I'm confident
// that the independent adjustments can get similar results and keep the
// complextiy down. However, the other approach could work without using
// TableFinder at all if it is fast enough. It comes down to properly
// deciding what is a table. The code currently relies on TableFinder's
// guess to the location of a table for that.
StructuredTable *RecognizeTable(const TBOX &guess_box);
protected:
////////
//////// Lined tables
////////
// Returns true if the given box has a lined table within it. The
// table argument will be updated with the table if the table exists.
bool RecognizeLinedTable(const TBOX &guess_box, StructuredTable *table);
// Returns true if the given box has a large number of horizontal and
// vertical lines present. If so, we assume the extent of these lines
// uniquely defines a table and find that table via SolveLinedTable.
bool HasSignificantLines(const TBOX &guess);
// Given enough horizontal and vertical lines in a region, find a bounding
// box that encloses all of them (as well as newly introduced lines).
// The bounding box is the smallest box that encloses the lines in guess
// without having any lines sticking out of it.
// bounding_box is an in/out parameter.
// On input, it in the extents of the box to search.
// On output, it is the resulting bounding box.
bool FindLinesBoundingBox(TBOX *bounding_box);
// Iteration in above search.
// bounding_box is an in/out parameter.
// On input, it in the extents of the box to search.
// On output, it is the resulting bounding box.
bool FindLinesBoundingBoxIteration(TBOX *bounding_box);
////////
//////// Generic "whitespaced" tables
////////
// Returns true if the given box has a whitespaced table within it. The
// table argument will be updated if the table exists. Also note
// that this method will fail if the guess_box center is not
// mostly within the table.
bool RecognizeWhitespacedTable(const TBOX &guess_box, StructuredTable *table);
// Finds the location of a horizontal split relative to y.
// This function is mostly unused now. If the SolveWhitespacedTable
// changes much, it can be removed. Note, it isn't really as reliable
// as I thought. I went with alternatives for most of the other uses.
int NextHorizontalSplit(int left, int right, int y, bool top_to_bottom);
// Indicates that a table row is weak. This means that it has
// many missing data cells or very large cell heights compared.
// to the rest of the table.
static bool IsWeakTableRow(StructuredTable *table, int row);
// Input data, used as read only data to make decisions.
ColPartitionGrid *text_grid_ = nullptr; // Text ColPartitions
ColPartitionGrid *line_grid_ = nullptr; // Line ColPartitions
// Table constraints, a "good" table must satisfy these.
int min_height_ = 0;
int min_width_ = 0;
// Filters, used to prevent awkward partitions from destroying structure.
int max_text_height_ = INT32_MAX; // Horizontal lines may intersect taller text.
};
} // namespace tesseract
#endif /* TABLERECOG_H_ */
| 4,436 |
1,338 | // main.cpp
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <Application.h>
#include <Bitmap.h>
#include <Button.h>
#include <Message.h>
#include <MessageRunner.h>
#include <Messenger.h>
#include <View.h>
#include <Window.h>
#include "bitmap.h"
enum {
MSG_RESET = 'rset',
MSG_TICK = 'tick',
};
#define SPEED 2.0
// random_number_between
float
random_number_between(float v1, float v2)
{
if (v1 < v2)
return v1 + fmod(rand() / 1000.0, (v2 - v1));
else if (v2 < v1)
return v2 + fmod(rand() / 1000.0, (v1 - v2));
return v1;
}
// TestView
class TestView : public BView {
public:
TestView(BRect frame, const char* name,
uint32 resizeFlags, uint32 flags);
virtual void AttachedToWindow();
virtual void MessageReceived(BMessage* message);
virtual void Draw(BRect updateRect);
virtual void MouseDown(BPoint where);
virtual void MouseUp(BPoint where);
virtual void MouseMoved(BPoint where, uint32 transit,
const BMessage* dragMessage);
private:
void _ResetRect();
void _InvalidateBitmapRect(BRect r);
void _DrawCross(BPoint where, rgb_color c);
struct point {
double x;
double y;
double direction_x;
double direction_y;
double velocity_x;
double velocity_y;
};
struct color_cycle {
uint8 value;
double direction;
};
void _FillBitmap(point* polygon);
void _InitPolygon(const BRect& b, point* polygon) const;
void _InitColor(color_cycle* color) const;
void _MorphPolygon(const BRect& b, point* polygon);
void _MorphColor(color_cycle* color);
BBitmap* fBitmap;
BView* fOffscreenView;
BMessageRunner* fTicker;
BRect fBitmapRect;
enum {
TRACKING_NONE = 0,
TRACKING_LEFT,
TRACKING_RIGHT,
TRACKING_TOP,
TRACKING_BOTTOM,
TRACKING_LEFT_TOP,
TRACKING_RIGHT_TOP,
TRACKING_LEFT_BOTTOM,
TRACKING_RIGHT_BOTTOM,
TRACKING_ALL
};
uint32 fTracking;
BPoint fLastMousePos;
point fPolygon[4];
color_cycle fColor[3];
};
// constructor
TestView::TestView(BRect frame, const char* name,
uint32 resizeFlags, uint32 flags)
: BView(frame, name, resizeFlags, flags),
// fBitmap(new BBitmap(BRect(0, 0, kBitmapWidth - 1, kBitmapHeight -1), 0, kBitmapFormat)),
// fBitmap(new BBitmap(BRect(0, 0, 32 - 1, 8 - 1), 0, B_CMAP8)),
// fBitmap(new BBitmap(BRect(0, 0, 32 - 1, 8 - 1), 0, B_GRAY8)),
fBitmap(new BBitmap(BRect(0, 0, 199, 99), B_RGB32, true)),
// fBitmap(new BBitmap(BRect(0, 0, 639, 479), B_RGB32, true)),
// fBitmap(new BBitmap(BRect(0, 0, 639, 479), B_CMAP8, true)),
// fBitmap(new BBitmap(BRect(0, 0, 199, 99), B_CMAP8, true)),
// fBitmap(new BBitmap(BRect(0, 0, 199, 99), B_GRAY8, true)),
fOffscreenView(new BView(fBitmap->Bounds(), "Offscreen view",
B_FOLLOW_ALL, B_WILL_DRAW | B_SUBPIXEL_PRECISE)),
fTicker(NULL),
fBitmapRect(),
fTracking(TRACKING_NONE),
fLastMousePos(-1.0, -1.0)
{
SetViewColor(B_TRANSPARENT_COLOR);
SetLowColor(ui_color(B_PANEL_BACKGROUND_COLOR));
// uint32 size = min_c((uint32)fBitmap->BitsLength(), sizeof(kBitmapBits));
// memcpy(fBitmap->Bits(), kBitmapBits, size);
/* uint8* bits = (uint8*)fBitmap->Bits();
uint32 width = fBitmap->Bounds().IntegerWidth() + 1;
uint32 height = fBitmap->Bounds().IntegerHeight() + 1;
uint32 bpr = fBitmap->BytesPerRow();
printf("width: %ld, height: %ld, bpr: %ld\n", width, height, bpr);
int32 index = 0;
for (uint32 y = 0; y < height; y++) {
uint8* h = bits;
for (uint32 x = 0; x < width; x++) {
*h = index++;
h++;
}
bits += bpr;
}
BRect a(0.0, 10.0, 20.0, 10.0);
BRect b(0.0, 10.0, 10.0, 30.0);
printf("Intersects: %d\n", a.Intersects(b));*/
if (fBitmap->Lock()) {
fBitmap->AddChild(fOffscreenView);
fOffscreenView->SetBlendingMode(B_CONSTANT_ALPHA, B_ALPHA_COMPOSITE);
fBitmap->Unlock();
}
srand((long int)system_time());
_InitPolygon(fBitmap->Bounds(), fPolygon);
_InitColor(fColor);
_ResetRect();
}
// AttachedToWindow
void
TestView::AttachedToWindow()
{
BMessenger mess(this, Window());
BMessage msg(MSG_TICK);
fTicker = new BMessageRunner(mess, &msg, 40000LL);
}
// MessageReceived
void
TestView::MessageReceived(BMessage* message)
{
switch (message->what) {
case MSG_RESET: {
BRect old = fBitmapRect;
_ResetRect();
_InvalidateBitmapRect(old | fBitmapRect);
break;
}
case MSG_TICK:
_MorphPolygon(fBitmap->Bounds(), fPolygon);
_MorphColor(fColor);
_FillBitmap(fPolygon);
Invalidate(fBitmapRect);
break;
default:
BView::MessageReceived(message);
break;
}
}
// Draw
void
TestView::Draw(BRect updateRect)
{
SetDrawingMode(B_OP_ALPHA);
DrawBitmap(fBitmap, fBitmap->Bounds(), fBitmapRect);
SetDrawingMode(B_OP_COPY);
// background arround bitmap
BRect topOfBitmap(updateRect.left, updateRect.top, updateRect.right, fBitmapRect.top - 1);
if (topOfBitmap.IsValid())
FillRect(topOfBitmap, B_SOLID_LOW);
BRect leftOfBitmap(updateRect.left, fBitmapRect.top, fBitmapRect.left - 1, fBitmapRect.bottom);
if (leftOfBitmap.IsValid())
FillRect(leftOfBitmap, B_SOLID_LOW);
BRect rightOfBitmap(fBitmapRect.right + 1, fBitmapRect.top, updateRect.right, fBitmapRect.bottom);
if (rightOfBitmap.IsValid())
FillRect(rightOfBitmap, B_SOLID_LOW);
BRect bottomOfBitmap(updateRect.left, fBitmapRect.bottom + 1, updateRect.right, updateRect.bottom);
if (bottomOfBitmap.IsValid())
FillRect(bottomOfBitmap, B_SOLID_LOW);
// indicate the frame to see any errors in the drawing code
rgb_color red = (rgb_color){ 255, 0, 0, 255 };
_DrawCross(fBitmapRect.LeftTop() + BPoint(-1.0, -1.0), red);
_DrawCross(fBitmapRect.RightTop() + BPoint(1.0, -1.0), red);
_DrawCross(fBitmapRect.LeftBottom() + BPoint(-1.0, 1.0), red);
_DrawCross(fBitmapRect.RightBottom() + BPoint(1.0, 1.0), red);
// text
SetDrawingMode(B_OP_ALPHA);
const char* message = "Click and drag to move and resize the bitmap!";
BPoint textPos(20.0, 30.0);
SetHighColor(255, 255, 255, 180);
DrawString(message, textPos);
SetHighColor(0, 0, 0, 180);
DrawString(message, textPos + BPoint(-1.0, -1.0));
}
// hit_test
bool
hit_test(BPoint where, BPoint p)
{
BRect r(p, p);
r.InsetBy(-5.0, -5.0);
return r.Contains(where);
}
// hit_test
bool
hit_test(BPoint where, BPoint a, BPoint b)
{
BRect r(a, b);
if (a.x == b.x)
r.InsetBy(-3.0, 0.0);
else
r.InsetBy(0.0, -3.0);
return r.Contains(where);
}
// MouseDown
void
TestView::MouseDown(BPoint where)
{
fTracking = TRACKING_NONE;
// check if we hit a corner
if (hit_test(where, fBitmapRect.LeftTop()))
fTracking = TRACKING_LEFT_TOP;
else if (hit_test(where, fBitmapRect.RightTop()))
fTracking = TRACKING_RIGHT_TOP;
else if (hit_test(where, fBitmapRect.LeftBottom()))
fTracking = TRACKING_LEFT_BOTTOM;
else if (hit_test(where, fBitmapRect.RightBottom()))
fTracking = TRACKING_RIGHT_BOTTOM;
// check if we hit a side
else if (hit_test(where, fBitmapRect.LeftTop(), fBitmapRect.RightTop()))
fTracking = TRACKING_TOP;
else if (hit_test(where, fBitmapRect.LeftTop(), fBitmapRect.LeftBottom()))
fTracking = TRACKING_LEFT;
else if (hit_test(where, fBitmapRect.RightTop(), fBitmapRect.RightBottom()))
fTracking = TRACKING_RIGHT;
else if (hit_test(where, fBitmapRect.LeftBottom(), fBitmapRect.RightBottom()))
fTracking = TRACKING_BOTTOM;
// check if we hit inside the rect
else if (fBitmapRect.Contains(where))
fTracking = TRACKING_ALL;
fLastMousePos = where;
}
// MouseUp
void
TestView::MouseUp(BPoint where)
{
fTracking = TRACKING_NONE;
}
// MouseMoved
void
TestView::MouseMoved(BPoint where, uint32 transit,
const BMessage* dragMessage)
{
if (fTracking > TRACKING_NONE) {
BRect old = fBitmapRect;
BPoint offset = where - fLastMousePos;
switch (fTracking) {
case TRACKING_LEFT_TOP:
fBitmapRect.Set(fBitmapRect.left + offset.x,
fBitmapRect.top + offset.y,
fBitmapRect.right,
fBitmapRect.bottom);
break;
case TRACKING_RIGHT_BOTTOM:
fBitmapRect.Set(fBitmapRect.left,
fBitmapRect.top,
fBitmapRect.right + offset.x,
fBitmapRect.bottom + offset.y);
break;
case TRACKING_LEFT_BOTTOM:
fBitmapRect.Set(fBitmapRect.left + offset.x,
fBitmapRect.top,
fBitmapRect.right,
fBitmapRect.bottom + offset.y);
break;
case TRACKING_RIGHT_TOP:
fBitmapRect.Set(fBitmapRect.left,
fBitmapRect.top + offset.y,
fBitmapRect.right + offset.x,
fBitmapRect.bottom);
break;
case TRACKING_LEFT:
fBitmapRect.Set(fBitmapRect.left + offset.x,
fBitmapRect.top,
fBitmapRect.right,
fBitmapRect.bottom);
break;
case TRACKING_TOP:
fBitmapRect.Set(fBitmapRect.left,
fBitmapRect.top + offset.y,
fBitmapRect.right,
fBitmapRect.bottom);
break;
case TRACKING_RIGHT:
fBitmapRect.Set(fBitmapRect.left,
fBitmapRect.top,
fBitmapRect.right + offset.x,
fBitmapRect.bottom);
break;
case TRACKING_BOTTOM:
fBitmapRect.Set(fBitmapRect.left,
fBitmapRect.top,
fBitmapRect.right,
fBitmapRect.bottom + offset.y);
break;
case TRACKING_ALL:
default:
fBitmapRect.OffsetBy(offset);
break;
}
fLastMousePos = where;
if (old != fBitmapRect)
_InvalidateBitmapRect(old | fBitmapRect);
}
}
// _ResetRect
void
TestView::_ResetRect()
{
fBitmapRect = fBitmap->Bounds();
fBitmapRect.OffsetBy(floorf((Bounds().Width() - fBitmapRect.Width()) / 2.0 + 0.5),
floorf((Bounds().Height() - fBitmapRect.Height()) / 2.0 + 0.5));
}
// _InvalidateBitmapRect
void
TestView::_InvalidateBitmapRect(BRect r)
{
r.InsetBy(-4.0, -4.0);
Invalidate(r);
}
// _DrawCross
void
TestView::_DrawCross(BPoint where, rgb_color c)
{
BeginLineArray(4);
AddLine(BPoint(where.x, where.y - 3),
BPoint(where.x, where.y - 1), c);
AddLine(BPoint(where.x, where.y + 1),
BPoint(where.x, where.y + 3), c);
AddLine(BPoint(where.x - 3, where.y),
BPoint(where.x - 1, where.y), c);
AddLine(BPoint(where.x + 1, where.y),
BPoint(where.x + 3, where.y), c);
EndLineArray();
}
// _FillBitmap
void
TestView::_FillBitmap(point* polygon)
{
if (fBitmap->Lock()) {
fOffscreenView->SetDrawingMode(B_OP_COPY);
fOffscreenView->SetHighColor(0, 0, 0, 30);
fOffscreenView->FillRect(fOffscreenView->Bounds());
fOffscreenView->SetDrawingMode(B_OP_ALPHA);
fOffscreenView->SetHighColor(fColor[0].value,
fColor[1].value,
fColor[2].value,
30);
fOffscreenView->SetPenSize(4);
fOffscreenView->SetLineMode(B_BUTT_CAP, B_ROUND_JOIN);
BPoint pointList[4];
pointList[0].x = polygon[0].x;
pointList[0].y = polygon[0].y;
pointList[1].x = polygon[1].x;
pointList[1].y = polygon[1].y;
pointList[2].x = polygon[2].x;
pointList[2].y = polygon[2].y;
pointList[3].x = polygon[3].x;
pointList[3].y = polygon[3].y;
fOffscreenView->StrokePolygon(pointList, 4);
fOffscreenView->Sync();
fBitmap->Unlock();
}
}
// _InitPolygon
void
TestView::_InitPolygon(const BRect& b, point* polygon) const
{
polygon[0].x = b.left;
polygon[0].y = b.top;
polygon[0].direction_x = random_number_between(-SPEED, SPEED);
polygon[0].direction_y = random_number_between(-SPEED, SPEED);
polygon[0].velocity_x = 0.0;
polygon[0].velocity_y = 0.0;
polygon[1].x = b.right;
polygon[1].y = b.top;
polygon[1].direction_x = random_number_between(-SPEED, SPEED);
polygon[1].direction_y = random_number_between(-SPEED, SPEED);
polygon[1].velocity_x = 0.0;
polygon[1].velocity_y = 0.0;
polygon[2].x = b.right;
polygon[2].y = b.bottom;
polygon[2].direction_x = random_number_between(-SPEED, SPEED);
polygon[2].direction_y = random_number_between(-SPEED, SPEED);
polygon[2].velocity_x = 0.0;
polygon[2].velocity_y = 0.0;
polygon[3].x = b.left;
polygon[3].y = b.bottom;
polygon[3].direction_x = random_number_between(-SPEED, SPEED);
polygon[3].direction_y = random_number_between(-SPEED, SPEED);
polygon[3].velocity_x = 0.0;
polygon[3].velocity_y = 0.0;
}
// _InitColor
void
TestView::_InitColor(color_cycle* color) const
{
color[0].value = 0;
color[0].direction = random_number_between(-SPEED * 4, SPEED * 4);
color[1].value = 0;
color[1].direction = random_number_between(-SPEED * 4, SPEED * 4);
color[2].value = 0;
color[2].direction = random_number_between(-SPEED * 4, SPEED * 4);
}
// morph
inline void
morph(double* value, double* direction, double* velocity, double min, double max)
{
*value += *velocity;
// flip direction if necessary
if (*value < min && *direction < 0.0) {
*direction = -*direction;
} else if (*value > max && *direction > 0.0) {
*direction = -*direction;
}
// accelerate velocity
if (*direction < 0.0) {
if (*velocity > *direction)
*velocity += *direction / 10.0;
// truncate velocity
if (*velocity < *direction)
*velocity = *direction;
} else {
if (*velocity < *direction)
*velocity += *direction / 10.0;
// truncate velocity
if (*velocity > *direction)
*velocity = *direction;
}
}
// morph
inline void
morph(uint8* value, double* direction)
{
int32 v = (int32)(*value + *direction);
if (v < 0) {
v = 0;
*direction = -*direction;
} else if (v > 255) {
v = 255;
*direction = -*direction;
}
*value = (uint8)v;
}
// _MorphPolygon
void
TestView::_MorphPolygon(const BRect& b, point* polygon)
{
morph(&polygon[0].x, &polygon[0].direction_x, &polygon[0].velocity_x, b.left, b.right);
morph(&polygon[1].x, &polygon[1].direction_x, &polygon[1].velocity_x, b.left, b.right);
morph(&polygon[2].x, &polygon[2].direction_x, &polygon[2].velocity_x, b.left, b.right);
morph(&polygon[3].x, &polygon[3].direction_x, &polygon[3].velocity_x, b.left, b.right);
morph(&polygon[0].y, &polygon[0].direction_y, &polygon[0].velocity_y, b.top, b.bottom);
morph(&polygon[1].y, &polygon[1].direction_y, &polygon[1].velocity_y, b.top, b.bottom);
morph(&polygon[2].y, &polygon[2].direction_y, &polygon[2].velocity_y, b.top, b.bottom);
morph(&polygon[3].y, &polygon[3].direction_y, &polygon[3].velocity_y, b.top, b.bottom);
}
// _MorphColor
void
TestView::_MorphColor(color_cycle* color)
{
morph(&color[0].value, &color[0].direction);
morph(&color[1].value, &color[1].direction);
morph(&color[2].value, &color[2].direction);
}
// show_window
void
show_window(BRect frame, const char* name)
{
BWindow* window = new BWindow(frame, name,
B_TITLED_WINDOW,
B_ASYNCHRONOUS_CONTROLS | B_QUIT_ON_WINDOW_CLOSE);
BView* view = new TestView(window->Bounds(), "test", B_FOLLOW_ALL,
B_WILL_DRAW/* | B_FULL_UPDATE_ON_RESIZE*/);
window->AddChild(view);
BRect b(0.0, 0.0, 60.0, 15.0);
b.OffsetTo(5.0, view->Bounds().bottom - (b.Height() + 15.0));
BButton* control = new BButton(b, "button", "Reset", new BMessage(MSG_RESET),
B_FOLLOW_LEFT | B_FOLLOW_BOTTOM);
view->AddChild(control);
control->SetTarget(view);
control->SetViewUIColor(B_PANEL_BACKGROUND_COLOR);
window->Show();
}
// main
int
main(int argc, char** argv)
{
BApplication* app = new BApplication("application/x.vnd-Haiku.BitmapDrawing");
// BRect frame(10.0, 30.0, 790.0, 590.0);
BRect frame(10.0, 30.0, 330.0, 220.0);
show_window(frame, "BitmapDrawing");
app->Run();
delete app;
return 0;
}
| 6,724 |
392 | package com.platform.dao;
import com.platform.entity.GroupBuyingVo;
import java.util.List;
import java.util.Map;
/**
* Dao
*
* @author xuyang
* @email <EMAIL>
* @date 2019-06-13 22:00:12
*/
public interface GroupBuyingMapper extends BaseDao<GroupBuyingVo> {
List<GroupBuyingVo> queryLoseList(Map map);
}
| 120 |
2,112 | /*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _THRIFT_CONCURRENCY_UTIL_H_
#define _THRIFT_CONCURRENCY_UTIL_H_ 1
#include <cstddef>
#include <cstdint>
#include <ctime>
#include <folly/Utility.h>
#include <folly/portability/SysTime.h>
namespace apache {
namespace thrift {
namespace concurrency {
/**
* Utility methods
*
* This class contains basic utility methods for converting time formats,
* and other common platform-dependent concurrency operations.
* It should not be included in API headers for other concurrency library
* headers, since it will, by definition, pull in all sorts of horrid
* platform dependent crap. Rather it should be included directly in
* concurrency library implementation source.
*
* @version $Id:$
*/
class Util {
public:
static const int64_t NS_PER_S = 1000000000LL;
static const int64_t US_PER_S = 1000000LL;
static const int64_t MS_PER_S = 1000LL;
static const int64_t NS_PER_MS = NS_PER_S / MS_PER_S;
static const int64_t NS_PER_US = NS_PER_S / US_PER_S;
static const int64_t US_PER_MS = US_PER_S / MS_PER_S;
/**
* Converts millisecond timestamp into a timespec struct
*
* @param struct timespec& result
* @param time or duration in milliseconds
*/
static void toTimespec(struct timespec& result, int64_t value) {
result.tv_sec = value / MS_PER_S; // ms to s
result.tv_nsec = (value % MS_PER_S) * NS_PER_MS; // ms to ns
}
static void toTicks(
int64_t& result,
int64_t secs,
int64_t oldTicks,
int64_t oldTicksPerSec,
int64_t newTicksPerSec) {
result = secs * newTicksPerSec;
result += oldTicks * newTicksPerSec / oldTicksPerSec;
int64_t oldPerNew = oldTicksPerSec / newTicksPerSec;
if (oldPerNew && ((oldTicks % oldPerNew) >= (oldPerNew / 2))) {
++result;
}
}
/**
* Converts struct timespec to arbitrary-sized ticks since epoch
*/
static void toTicks(
int64_t& result, const struct timespec& value, int64_t ticksPerSec) {
return toTicks(result, value.tv_sec, value.tv_nsec, NS_PER_S, ticksPerSec);
}
/**
* Converts struct timeval to arbitrary-sized ticks since epoch
*/
static void toTicks(
int64_t& result, const struct timeval& value, int64_t ticksPerSec) {
return toTicks(result, value.tv_sec, value.tv_usec, US_PER_S, ticksPerSec);
}
/**
* Converts struct timespec to milliseconds
*/
static void toMilliseconds(int64_t& result, const struct timespec& value) {
return toTicks(result, value, MS_PER_S);
}
/**
* Converts struct timeval to milliseconds
*/
static void toMilliseconds(int64_t& result, const struct timeval& value) {
return toTicks(result, value, MS_PER_S);
}
/**
* Get current time as a number of arbitrary-size ticks from epoch
*/
static int64_t currentTimeTicks(int64_t ticksPerSec);
/**
* Get current time as milliseconds from epoch
*/
static int64_t currentTime() { return currentTimeTicks(MS_PER_S); }
};
} // namespace concurrency
} // namespace thrift
} // namespace apache
#endif // #ifndef _THRIFT_CONCURRENCY_UTIL_H_
| 1,276 |
575 | // Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef UI_GTK_NAV_BUTTON_PROVIDER_GTK_H_
#define UI_GTK_NAV_BUTTON_PROVIDER_GTK_H_
#include <map>
#include "base/component_export.h"
#include "ui/gfx/image/image_skia.h"
#include "ui/views/controls/button/button.h"
#include "ui/views/linux_ui/nav_button_provider.h"
namespace gtk {
class COMPONENT_EXPORT(GTK) NavButtonProviderGtk
: public views::NavButtonProvider {
public:
NavButtonProviderGtk();
~NavButtonProviderGtk() override;
// views::NavButtonProvider:
void RedrawImages(int top_area_height, bool maximized, bool active) override;
gfx::ImageSkia GetImage(views::NavButtonProvider::FrameButtonDisplayType type,
views::Button::ButtonState state) const override;
gfx::Insets GetNavButtonMargin(
views::NavButtonProvider::FrameButtonDisplayType type) const override;
gfx::Insets GetTopAreaSpacing() const override;
int GetInterNavButtonSpacing() const override;
private:
std::map<views::NavButtonProvider::FrameButtonDisplayType,
gfx::ImageSkia[views::Button::STATE_COUNT]>
button_images_;
std::map<views::NavButtonProvider::FrameButtonDisplayType, gfx::Insets>
button_margins_;
gfx::Insets top_area_spacing_;
int inter_button_spacing_;
};
} // namespace gtk
#endif // UI_GTK_NAV_BUTTON_PROVIDER_GTK_H_
| 533 |
522 | <filename>src/algs/model/gametree/debug/AlphaBetaDebugNode.java<gh_stars>100-1000
package algs.model.gametree.debug;
import algs.debug.Formatter;
import algs.debug.IGraphEntity;
import algs.debug.ISelectFont;
/**
* This node is used when depicting debugging information about an Alpha/Beta
* node in the game tree path finding search.
*
* @author <NAME>
* @version 1.0, 6/15/08
* @since 1.0
*/
public class AlphaBetaDebugNode implements IGraphEntity, ISelectFont {
/** Counter to ensure uniqueness. */
private static int _ctrMaster;
/** Unique id (an incrementing integer) for this node. */
private int _ctr;
/** Alpha (lower bound) value. */
final int alpha;
/** Beta (upper bound) value. */
final int beta;
/** Computed score value. */
int value;
/** Until value is set, it isn't shown. */
boolean set = false;
/**
* Represent a node in the search for a solution in alpha beta.
* @param alpha known lower bound for game tree node
* @param beta known upper bound for game tree node
*/
public AlphaBetaDebugNode (int alpha, int beta) {
this.alpha = alpha;
this.beta = beta;
_ctr = _ctrMaster++;
}
/**
* Retrieve value for node computed so far.
* <p>
* Primarily here for testing
* @return value associated with AlphaBetaNode
*/
public int value() {
return this.value;
}
/**
* Set the value for this node based upon computation.
* <p>
* Once invoked, the 'set' field changes and the value becomes part of
* the visualization.
*
* @param v computed score value for node.
*/
public void value(int v) {
set = true;
this.value = v;
}
/**
* Retrieve the unique identifier for this node.
* @return unique identifier associated with this debug node.
*/
public int counter() { return _ctr; }
/**
* Generate copy of this node.
*
* Useful when visiting all nodes in the game tree and needing to record progress.
* @return copy of {@link AlphaBetaDebugNode}
*/
public AlphaBetaDebugNode copy() {
AlphaBetaDebugNode n = new AlphaBetaDebugNode (alpha, beta);
n.value = value;
n._ctr = _ctr;
return n;
}
/** Reasonable toString() method for debugging. */
public String toString () { return "[" + alpha + "," + beta + "]"; }
/**
* Compute label for Dotty output.
*
* To try to maximize utility, we want to show Alpha Beta in symbol font.
* However, once score has been set, we can't since then "score
*/
public String nodeLabel() {
StringBuilder sb = new StringBuilder();
sb.append("{a: "); // in symbol font, this becomes "Greek alpha"
sb.append(Formatter.convert(alpha));
sb.append("|b: "); // in symbol font, this becomes "Greek beta"
sb.append(Formatter.convert(beta));
// no longer show score, since we are in symbol font.
if (set) {
sb.append("| ");
sb.append(Formatter.convert(value));
}
sb.append("}");
return sb.toString();
}
/** To properly draw Alpha/Beta in symbol font. */
public String fontName() {
// When formatting becomes available again
//
// if (Formatter.isSymbol(alpha) || Formatter.isSymbol(beta)) {
// return "Symbol";
// }
return null; // nothing special.
}
/** Default font size to use is ok. */
public int fontSize() {
return 0;
}
}
| 1,105 |
575 | <filename>chrome/browser/ui/webui/tab_strip/tab_before_unload_tracker.cc
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/browser/ui/webui/tab_strip/tab_before_unload_tracker.h"
#include <memory>
#include "content/public/browser/web_contents.h"
#include "content/public/browser/web_contents_observer.h"
namespace tab_strip_ui {
TabBeforeUnloadTracker::TabBeforeUnloadTracker(
TabCloseCancelledCallback cancelled_callback)
: cancelled_callback_(std::move(cancelled_callback)) {}
TabBeforeUnloadTracker::~TabBeforeUnloadTracker() = default;
void TabBeforeUnloadTracker::Observe(content::WebContents* contents) {
observers_[contents] = std::make_unique<TabObserver>(contents, this);
}
void TabBeforeUnloadTracker::Unobserve(content::WebContents* contents) {
observers_.erase(contents);
}
void TabBeforeUnloadTracker::OnBeforeUnloadDialogCancelled(
content::WebContents* contents) {
cancelled_callback_.Run(contents);
}
class TabBeforeUnloadTracker::TabObserver
: public content::WebContentsObserver {
public:
TabObserver(content::WebContents* contents, TabBeforeUnloadTracker* tracker)
: content::WebContentsObserver(contents), tracker_(tracker) {}
~TabObserver() override = default;
// content::WebContentsObserver
void WebContentsDestroyed() override { tracker_->Unobserve(web_contents()); }
void BeforeUnloadDialogCancelled() override {
tracker_->OnBeforeUnloadDialogCancelled(web_contents());
}
private:
TabBeforeUnloadTracker* tracker_;
};
} // namespace tab_strip_ui
| 520 |
369 | // Copyright (c) 2017-2021, Mudita <NAME>.o.o. All rights reserved.
// For licensing, see https://github.com/mudita/MuditaOS/LICENSE.md
#pragma once
#include "ThreadsModel.hpp"
#include <AppWindow.hpp>
#include <gui/widgets/Icon.hpp>
#include <Image.hpp>
#include <Label.hpp>
#include <ListView.hpp>
#include <Text.hpp>
#include <functional>
#include <string>
namespace gui
{
class MessagesMainWindow : public AppWindow, public app::AsyncCallbackReceiver
{
protected:
Icon *emptyListIcon = nullptr;
std::shared_ptr<ThreadsModel> threadsModel = nullptr;
gui::ListView *list = nullptr;
public:
explicit MessagesMainWindow(app::ApplicationCommon *app);
// virtual methods
bool onInput(const InputEvent &inputEvent) override;
void onBeforeShow(ShowMode mode, SwitchData *data) override;
bool onDatabaseMessage(sys::Message *msgl) override;
void rebuild() override;
void buildInterface() override;
};
} /* namespace gui */
| 432 |
14,668 | // Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "test_print_preview_dialog_cloned_observer.h"
#include "chrome/browser/printing/test_print_view_manager_for_request_preview.h"
#include "content/public/browser/web_contents_observer.h"
using content::WebContents;
namespace printing {
TestPrintPreviewDialogClonedObserver::TestPrintPreviewDialogClonedObserver(
content::WebContents* dialog)
: content::WebContentsObserver(dialog) {}
TestPrintPreviewDialogClonedObserver::~TestPrintPreviewDialogClonedObserver() =
default;
void TestPrintPreviewDialogClonedObserver::DidCloneToNewWebContents(
WebContents* old_web_contents,
WebContents* new_web_contents) {
TestPrintViewManagerForRequestPreview::CreateForWebContents(new_web_contents);
}
} // namespace printing
| 282 |
622 | <filename>nifty-client/src/main/java/com/facebook/nifty/client/HttpClientChannel.java<gh_stars>100-1000
/*
* Copyright (C) 2012-2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.nifty.client;
import com.facebook.nifty.duplex.TDuplexProtocolFactory;
import com.google.common.net.HttpHeaders;
import org.apache.thrift.transport.TTransportException;
import org.jboss.netty.buffer.ChannelBuffer;
import org.jboss.netty.channel.Channel;
import org.jboss.netty.channel.ChannelFuture;
import org.jboss.netty.handler.codec.http.DefaultHttpRequest;
import org.jboss.netty.handler.codec.http.HttpMethod;
import org.jboss.netty.handler.codec.http.HttpRequest;
import org.jboss.netty.handler.codec.http.HttpResponse;
import org.jboss.netty.handler.codec.http.HttpResponseStatus;
import org.jboss.netty.handler.codec.http.HttpVersion;
import org.jboss.netty.util.Timer;
import javax.annotation.concurrent.NotThreadSafe;
import java.util.Map;
@NotThreadSafe
public class HttpClientChannel extends AbstractClientChannel {
private final Channel underlyingNettyChannel;
private final String hostName;
private Map<String, String> headerDictionary;
private final String endpointUri;
protected HttpClientChannel(Channel channel,
Timer timer,
TDuplexProtocolFactory protocolFactory,
String hostName,
String endpointUri) {
super(channel, timer, protocolFactory);
this.underlyingNettyChannel = channel;
this.hostName = hostName;
this.endpointUri = endpointUri;
}
@Override
public Channel getNettyChannel() {
return underlyingNettyChannel;
}
@Override
protected ChannelBuffer extractResponse(Object message) throws TTransportException
{
if (!(message instanceof HttpResponse)) {
return null;
}
HttpResponse httpResponse = (HttpResponse) message;
if (!httpResponse.getStatus().equals(HttpResponseStatus.OK)) {
throw new TTransportException("HTTP response had non-OK status: " + httpResponse
.getStatus().toString());
}
ChannelBuffer content = httpResponse.getContent();
if (!content.readable()) {
return null;
}
return content;
}
@Override
protected ChannelFuture writeRequest(ChannelBuffer request)
{
HttpRequest httpRequest = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST,
endpointUri);
httpRequest.headers().add(HttpHeaders.HOST, hostName);
httpRequest.headers().add(HttpHeaders.CONTENT_LENGTH, request.readableBytes());
httpRequest.headers().add(HttpHeaders.CONTENT_TYPE, "application/x-thrift");
httpRequest.headers().add(HttpHeaders.ACCEPT, "application/x-thrift");
httpRequest.headers().add(HttpHeaders.USER_AGENT, "Java/Swift-HttpThriftClientChannel");
if (headerDictionary != null) {
for (Map.Entry<String, String> entry : headerDictionary.entrySet()) {
httpRequest.headers().add(entry.getKey(), entry.getValue());
}
}
httpRequest.setContent(request);
return underlyingNettyChannel.write(httpRequest);
}
public void setHeaders(Map<String, String> headers)
{
this.headerDictionary = headers;
}
}
| 1,523 |
892 | <filename>advisories/unreviewed/2022/05/GHSA-449q-v4j2-5h8p/GHSA-449q-v4j2-5h8p.json
{
"schema_version": "1.2.0",
"id": "GHSA-449q-v4j2-5h8p",
"modified": "2022-05-13T01:30:06Z",
"published": "2022-05-13T01:30:06Z",
"aliases": [
"CVE-2015-5320"
],
"details": "Jenkins before 1.638 and LTS before 1.625.2 do not properly verify the shared secret used in JNLP slave connections, which allows remote attackers to connect as slaves and obtain sensitive information or possibly gain administrative access by leveraging knowledge of the name of a slave.",
"severity": [
],
"affected": [
],
"references": [
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2015-5320"
},
{
"type": "WEB",
"url": "https://access.redhat.com/errata/RHSA-2016:0070"
},
{
"type": "WEB",
"url": "https://wiki.jenkins-ci.org/display/SECURITY/Jenkins+Security+Advisory+2015-11-11"
},
{
"type": "WEB",
"url": "http://rhn.redhat.com/errata/RHSA-2016-0489.html"
}
],
"database_specific": {
"cwe_ids": [
"CWE-200"
],
"severity": "MODERATE",
"github_reviewed": false
}
} | 536 |
1,080 | #include "ping/parse.h"
#include "ping/compose.h"
| 21 |
1,958 | <filename>src/main/java/com/freetymekiyan/algorithms/other/StringDecompression.java<gh_stars>1000+
package com.freetymekiyan.algorithms.other;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import java.util.Stack;
/**
* input : abbaba4x[a]bb3x[abaa2x[bab]]
* <p>
* output : abbabaaaaabbabaababbababaababbababaababbab
* <p>
* Created by kiyan on 6/3/16.
*/
public class StringDecompression {
private StringDecompression sd;
/**
*
*/
public String decompress(String s) {
StringBuilder res = new StringBuilder();
StringBuilder nested = new StringBuilder();
Stack<String> stack = new Stack<>();
int i = 0;
while (i < s.length()) {
char c = s.charAt(i);
if (Character.isDigit(c)) {
// Save pattern in a stack
String pattern = s.substring(i, i + 2); // Assume that only 1 digit before x
int j = i + 3;
while (j < s.length() && Character.isLetter(s.charAt(j))) {
pattern += s.charAt(j);
j++;
}
i = j - 1;
stack.push(pattern);
} else if (c == ']') {
// Pop pattern from stack
String[] pop = stack.pop().split("x");
String pattern = "";
for (int j = 0; j < Integer.parseInt(pop[0]); j++) {
pattern += pop[1] + nested.toString();
}
nested.setLength(0); // Clear string builder
nested.append(pattern);
if (stack.empty()) {
res.append(nested.toString());
nested.setLength(0);
}
} else {
res.append(c);
}
i++;
}
return res.toString();
}
@Before
public void setUp() {
sd = new StringDecompression();
}
@Test
public void testExamples() {
String res = sd.decompress("abbaba4x[a]bb3x[abaa2x[bab]]");
Assert.assertEquals("abbabaaaaabbabaababbababaababbababaababbab", res);
}
@After
public void tearDown() {
sd = null;
}
}
| 1,164 |
311 | <gh_stars>100-1000
package io.github.zeleven.mua;
public class FileEntity {
private String name;
private long lastModified;
private String absolutePath;
public FileEntity() {
}
public FileEntity(String name, long lastModified, String absolutePath) {
this.name = name;
this.lastModified = lastModified;
this.absolutePath = absolutePath;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public long getLastModified() {
return lastModified;
}
public void setLastModified(long lastModified) {
this.lastModified = lastModified;
}
public String getAbsolutePath() {
return absolutePath;
}
public void setAbsolutePath(String absolutePath) {
this.absolutePath = absolutePath;
}
}
| 388 |
2,151 | <filename>chrome/browser/ui/ash/accessibility/accessibility_controller_client_unittest.cc<gh_stars>1000+
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/browser/ui/ash/accessibility/accessibility_controller_client.h"
#include "ash/public/interfaces/accessibility_controller.mojom.h"
#include "base/macros.h"
#include "base/run_loop.h"
#include "base/test/scoped_task_environment.h"
#include "base/time/time.h"
#include "chromeos/audio/chromeos_sounds.h"
#include "mojo/public/cpp/bindings/binding.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "ui/accessibility/ax_enums.mojom.h"
namespace {
constexpr base::TimeDelta kShutdownSoundDuration =
base::TimeDelta::FromMilliseconds(1000);
class TestAccessibilityController : ash::mojom::AccessibilityController {
public:
TestAccessibilityController() : binding_(this) {}
~TestAccessibilityController() override = default;
ash::mojom::AccessibilityControllerPtr CreateInterfacePtr() {
ash::mojom::AccessibilityControllerPtr ptr;
binding_.Bind(mojo::MakeRequest(&ptr));
return ptr;
}
// ash::mojom::AccessibilityController:
void SetClient(ash::mojom::AccessibilityControllerClientPtr client) override {
was_client_set_ = true;
}
void SetDarkenScreen(bool darken) override {}
void BrailleDisplayStateChanged(bool connected) override {}
void SetFocusHighlightRect(const gfx::Rect& bounds_in_screen) override {}
void SetAccessibilityPanelFullscreen(bool fullscreen) override {}
void SetSelectToSpeakState(ash::mojom::SelectToSpeakState state) override {}
bool was_client_set() const { return was_client_set_; }
private:
mojo::Binding<ash::mojom::AccessibilityController> binding_;
bool was_client_set_ = false;
DISALLOW_COPY_AND_ASSIGN(TestAccessibilityController);
};
class FakeAccessibilityControllerClient : public AccessibilityControllerClient {
public:
FakeAccessibilityControllerClient() = default;
~FakeAccessibilityControllerClient() override = default;
// AccessibilityControllerClient:
void TriggerAccessibilityAlert(
ash::mojom::AccessibilityAlert alert) override {
last_a11y_alert_ = alert;
}
void PlayEarcon(int32_t sound_key) override { last_sound_key_ = sound_key; }
void PlayShutdownSound(PlayShutdownSoundCallback callback) override {
std::move(callback).Run(kShutdownSoundDuration);
}
void HandleAccessibilityGesture(ax::mojom::Gesture gesture) override {
last_a11y_gesture_ = gesture;
}
void ToggleDictation(ToggleDictationCallback callback) override {
++toggle_dictation_count_;
dictation_on_ = !dictation_on_;
std::move(callback).Run(dictation_on_);
}
void SilenceSpokenFeedback() override { ++silence_spoken_feedback_count_; }
void OnTwoFingerTouchStart() override { ++on_two_finger_touch_start_count_; }
void OnTwoFingerTouchStop() override { ++on_two_finger_touch_stop_count_; }
void ShouldToggleSpokenFeedbackViaTouch(
ShouldToggleSpokenFeedbackViaTouchCallback callback) override {
std::move(callback).Run(true);
}
void PlaySpokenFeedbackToggleCountdown(int tick_count) override {
spoken_feedback_toggle_count_down_ = tick_count;
}
void RequestSelectToSpeakStateChange() override {
++select_to_speak_state_changes_;
}
ash::mojom::AccessibilityAlert last_a11y_alert_ =
ash::mojom::AccessibilityAlert::NONE;
int32_t last_sound_key_ = -1;
ax::mojom::Gesture last_a11y_gesture_ = ax::mojom::Gesture::kNone;
int toggle_dictation_count_ = 0;
int silence_spoken_feedback_count_ = 0;
int on_two_finger_touch_start_count_ = 0;
int on_two_finger_touch_stop_count_ = 0;
int spoken_feedback_toggle_count_down_ = -1;
int select_to_speak_state_changes_ = 0;
private:
DISALLOW_COPY_AND_ASSIGN(FakeAccessibilityControllerClient);
bool dictation_on_ = false;
};
} // namespace
class AccessibilityControllerClientTest : public testing::Test {
public:
AccessibilityControllerClientTest() = default;
~AccessibilityControllerClientTest() override = default;
private:
base::test::ScopedTaskEnvironment scoped_task_enviroment_;
DISALLOW_COPY_AND_ASSIGN(AccessibilityControllerClientTest);
};
TEST_F(AccessibilityControllerClientTest, MethodCalls) {
FakeAccessibilityControllerClient client;
TestAccessibilityController controller;
client.InitForTesting(controller.CreateInterfacePtr());
client.FlushForTesting();
// Tests client is set.
EXPECT_TRUE(controller.was_client_set());
// Tests TriggerAccessibilityAlert method call.
const ash::mojom::AccessibilityAlert alert =
ash::mojom::AccessibilityAlert::SCREEN_ON;
client.TriggerAccessibilityAlert(alert);
EXPECT_EQ(alert, client.last_a11y_alert_);
// Tests PlayEarcon method call.
const int32_t sound_key = chromeos::SOUND_SHUTDOWN;
client.PlayEarcon(sound_key);
EXPECT_EQ(sound_key, client.last_sound_key_);
// Tests PlayShutdownSound method call.
base::TimeDelta sound_duration;
client.PlayShutdownSound(base::BindOnce(
[](base::TimeDelta* dst, base::TimeDelta duration) { *dst = duration; },
base::Unretained(&sound_duration)));
base::RunLoop().RunUntilIdle();
EXPECT_EQ(kShutdownSoundDuration, sound_duration);
// Tests HandleAccessibilityGesture method call.
ax::mojom::Gesture gesture = ax::mojom::Gesture::kClick;
client.HandleAccessibilityGesture(gesture);
EXPECT_EQ(gesture, client.last_a11y_gesture_);
// Tests ToggleDictation method call.
EXPECT_EQ(0, client.toggle_dictation_count_);
client.ToggleDictation(base::BindOnce([](bool b) {}));
EXPECT_EQ(1, client.toggle_dictation_count_);
EXPECT_EQ(0, client.silence_spoken_feedback_count_);
client.SilenceSpokenFeedback();
EXPECT_EQ(1, client.silence_spoken_feedback_count_);
// Tests OnTwoFingerTouchStart method call.
EXPECT_EQ(0, client.on_two_finger_touch_start_count_);
client.OnTwoFingerTouchStart();
EXPECT_EQ(1, client.on_two_finger_touch_start_count_);
// Tests OnTwoFingerTouchStop method call.
EXPECT_EQ(0, client.on_two_finger_touch_stop_count_);
client.OnTwoFingerTouchStop();
EXPECT_EQ(1, client.on_two_finger_touch_stop_count_);
// Tests ShouldToggleSpokenFeedbackViaTouch method call.
bool should_toggle = false;
client.ShouldToggleSpokenFeedbackViaTouch(base::BindOnce(
[](bool* dst, bool should_toggle) { *dst = should_toggle; },
base::Unretained(&should_toggle)));
base::RunLoop().RunUntilIdle();
EXPECT_TRUE(should_toggle);
// Tests PlaySpokenFeedbackToggleCountdown method call.
const int tick_count = 2;
client.PlaySpokenFeedbackToggleCountdown(tick_count);
EXPECT_EQ(tick_count, client.spoken_feedback_toggle_count_down_);
// Tests RequestSelectToSpeakStateChange method call.
client.RequestSelectToSpeakStateChange();
EXPECT_EQ(1, client.select_to_speak_state_changes_);
}
| 2,325 |
1,273 | package org.broadinstitute.hellbender.utils.reference;
import htsjdk.samtools.SAMSequenceDictionary;
import htsjdk.samtools.SAMSequenceRecord;
import org.broadinstitute.hellbender.utils.SimpleInterval;
import java.util.List;
import java.util.function.IntFunction;
import java.util.function.ToIntFunction;
/**
* Class to translate back and forth from absolute {@code long-typed} base positions to relative ones (the usual contig, position pairs).
* <p>
* If we were to concatenate the sequence of every chromosome in the order these appear in reference dictionary,
* each base in the reference with a absolute position or offset from the beginning of this imaginary super-contig.
* </p>
* <p>
* Some times it might be handy to use this type of address/coordinate rather than the usual contig (name or index) and position
* within the contig.
* </p>
* <p>
* This class implement such a transformation from absolute coordinates efficiently.
* </p>
* <p>
* Relative to absolute is rather a trivial matter if you keep track of the accumulative number of bases on the reference
* right before the start of each contig.
* </p>
* <p> Absolute to relative is a bit trickier. A obvious solution would consist in a binary search amogst the contig for the one that
* include the absolute base range that encloses the requested position. The cost of this lookup would be O(log C) where C is the number of
* contigs in the reference. In some cases like hg38 there are over 3000+ such contigs due to the alt-contigs and decoy, resulting in around 11 iterations
* to find the target's location contig</p>
* <p>
* This class has a couple of accelerations:
* <p>
* First we check whether the target contig is the last returned, which should be quite often the case when accessing the refernce in
* sequence.
* </p>
* <p>
* Then we keep a "percentile" table that contains the contig index that would include that percentile absolute position. The granularity
* of such table is linked to the number of contigs in the reference with a O(C) additional memory cost.
* </p>
* <p>
* These two acceleration may effectively reduce the look-up cost to O(1) is most scenarios. The draw back is a more complicate code and the
* need to deal with float-point arithmetic for the percentile look-up.
* </p>
* </p>
*/
public final class AbsoluteCoordinates {
/**
* Length of each contig.
*/
private final int[] lengths;
/**
* Percentile look up table.
*/
private final int[] percentiles;
/**
* Contains the number of bases before the contig with the ith index (0-based).
*/
private final long[] accumulative;
/**
* Total length of the reference.
*/
private final long total;
/**
* Factor to multiply to an absolute position to get its corresponding percentile.
*/
private final float percentileFactor;
/**
* Function that resolves the contig index given its name.
*/
private final ToIntFunction<String> contigToIndex;
/**
* Function that resolves the contig name given its index.
*/
private final IntFunction<String> indexToContig;
private int lastCtg;
private AbsoluteCoordinates(final int[] lengths, final long[] accumulative,
final ToIntFunction<String> contigToIndex, final IntFunction<String> indexToContig) {
this.lengths = lengths;
this.accumulative = accumulative;
this.contigToIndex = contigToIndex;
this.indexToContig = indexToContig;
this.total = accumulative[accumulative.length - 1];
this.percentiles = calculatePercentiles(lengths, accumulative, total);
this.percentileFactor = (percentiles.length - 1) / (float) this.total;
this.lastCtg = 0;
}
private static int[] calculatePercentiles(final int[] lengths, final long[] accumulative, final long total) {
final int[] result = new int[(accumulative.length << 1) + 1];
final float fraction = total / (float)(result.length - 1);
double fractionAccumulator = 0;
for (int i = 0, j = 0; i < lengths.length; i++) {
final long accumulativePlusLength = accumulative[i + 1]; // == accumulative[i] + lengths[i];
while (fractionAccumulator < accumulativePlusLength && j < result.length - 1) {
result[j++] = i;
fractionAccumulator += fraction;
}
}
result[result.length - 1] = lengths.length - 1;
return result;
}
public static AbsoluteCoordinates of(final SAMSequenceDictionary dictionary) {
final ToIntFunction<String> contigToIndex = dictionary::getSequenceIndex;
final IntFunction<String> indexToContig = i -> dictionary.getSequence(i).getContig();
final List<SAMSequenceRecord> sequences = dictionary.getSequences();
final int numberOfSequences = sequences.size();
final int[] lengths = new int[numberOfSequences];
final long[] accumulative = new long[numberOfSequences + 1];
for (int i = 0; i < numberOfSequences; i++) {
final SAMSequenceRecord sequence = sequences.get(i);
lengths[i] = sequence.getSequenceLength();
}
long leftSum = lengths[0];
for (int i = 1; i < numberOfSequences; i++) {
accumulative[i] = leftSum;
leftSum += lengths[i];
}
accumulative[numberOfSequences] = leftSum;
return new AbsoluteCoordinates(lengths, accumulative, contigToIndex, indexToContig);
}
public long toAbsolute(final String ctgName, final int position) {
return toAbsolute(contigToIndex.applyAsInt(ctgName), position);
}
/**
* Obtains the absolute coordinate for the start position in an interval.
* @param simpleInterval the target position in relative coordinates
* @return 1 or greater.
* @throws IllegalArgumentException no such contig name or tht conting is too small.
*/
public long toAbsolute(final SimpleInterval simpleInterval) {
return toAbsolute(simpleInterval.getContig(), simpleInterval.getStart());
}
public long toAbsolute(final int ctgIdx, final int position) {
if (lengths[ctgIdx] < position) {
throw new IllegalArgumentException("position outside containg contig");
}
lastCtg = ctgIdx;
return accumulative[ctgIdx] + position;
}
public static class Relative {
public final String contig;
public final int contigIndex;
public final int position;
Relative(final String contig, final int contigIndex, final int position) {
this.contig = contig;
this.contigIndex = contigIndex;
this.position = position;
}
}
@FunctionalInterface
interface RelativeFactory<E> {
E create(final String ctgName, final int ctgIdx, final int position);
}
public SimpleInterval toSimpleInterval(final long absoluteStart, final int length) {
return toRelative(absoluteStart, (n, i, p) -> new SimpleInterval(n, p, p + length - 1));
}
public Relative toRelative(final long absolute) {
return toRelative(absolute, Relative::new);
}
public <E> E toRelative(final long absolute, final RelativeFactory<E> factory) {
if (absolute < 1) {
throw new IllegalArgumentException("absolute cannot be less than 1");
} else if (absolute > total) {
throw new IllegalArgumentException("absolute is too large");
} else if (accumulative[lastCtg] < absolute) {
if (absolute <= accumulative[lastCtg + 1]) {
return factory.create(indexToContig.apply(lastCtg), lastCtg, (int) (absolute - accumulative[lastCtg]));
} else {
return searchRelative(absolute, factory);
}
} else {
return searchRelative(absolute, factory);
}
}
private <E> E searchRelative(final long target, final RelativeFactory<E> factory) {
final int percentileIndex = (int) (target * percentileFactor);
if (percentileIndex >= percentiles.length) {
throw new IllegalArgumentException("xx " + target + " " + total );
}
final int percentileCtg = percentiles[percentileIndex];
if (percentileIndex < percentiles.length - 1) {
return searchRelative(target, percentileCtg, percentiles[percentileIndex + 1], factory);
} else {
return searchRelative(target, percentileCtg, lengths.length - 1, factory);
}
}
private <E> E searchRelative(final long target, final int minCtgIdx, final int maxCtgIdx, final RelativeFactory<E> factory) {
int i = minCtgIdx, j = maxCtgIdx;
while (i < j) {
final int mid = (i + j) >> 1;
if (accumulative[mid] >= target) {
j = mid - 1;
} else if (accumulative[mid + 1] < target) {
i = mid + 1;
} else {
i = mid;
break;
}
}
lastCtg = i;
return factory.create(indexToContig.apply(i), i, (int) (target - accumulative[i]));
}
}
| 3,472 |
5,964 | <reponame>wenfeifei/miniblink49<filename>third_party/WebKit/Source/core/events/PointerIdManager.h
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef PointerIdManager_h
#define PointerIdManager_h
#include "wtf/ListHashSet.h"
namespace blink {
/**
Helper class for tracking the primary pointer id for each type of PointerEvents.
*/
class PointerIdManager {
public:
// TODO(mustaq): Move this enum to PointerEvent.h? Change the spec to use enums?
enum PointerType {
PointerTypeUnknown = 0,
PointerTypeMouse,
PointerTypePen,
PointerTypeTouch,
PointerTypeLastEntry // Must be the last entry in the list
};
PointerIdManager();
~PointerIdManager();
void clear();
void add(PointerType, unsigned);
void remove(PointerType, unsigned);
bool isPrimary(PointerType, unsigned);
private:
ListHashSet<unsigned> m_ids[PointerTypeLastEntry];
bool m_hasPrimaryId[PointerTypeLastEntry];
};
} // namespace blink
#endif // PointerIdManager_h
| 398 |
428 | <reponame>SmileyAG/bspsrc
/*
** 2013 July 3 3
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
*/
package info.ata4.bspunprotect;
import info.ata4.bsplib.BspFile;
import info.ata4.bsplib.lump.Lump;
import info.ata4.bsplib.lump.LumpFile;
import info.ata4.bsplib.lump.LumpType;
import org.apache.commons.compress.archivers.zip.ZipArchiveEntry;
import org.apache.commons.compress.archivers.zip.ZipFile;
import org.apache.commons.io.IOUtils;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Iterator;
/**
* BSPProtect map decrypter.
*
* @author <NAME> <barracuda415 at yahoo.de>
*/
public class BspUnprotect {
public static final String VERSION = "1.0";
public static final String BSPPROTECT_FILE = "entities.dat";
public static final String BSPPROTECT_KEY = "<KEY>";
/**
* @param args the command line arguments
*/
public static void main(String[] args) {
if (args.length == 0) {
System.out.println("BspUnprotect " + VERSION);
System.out.println("Usage: bspunprotect.jar <BSP file> [key]");
System.exit(0);
}
Path file = Paths.get(args[0]);
byte[] key = args.length >= 2 ? args[1].getBytes() : BSPPROTECT_KEY.getBytes();
try {
BspUnprotect unprot = new BspUnprotect();
unprot.setKey(key);
unprot.decrypt(file);
} catch (Exception ex) {
System.err.println(ex.getMessage());
}
}
private BspFile bspFile;
private byte[] key;
public void setKey(byte[] key) {
if (key.length % 8 != 0) {
throw new IllegalArgumentException("Invalid key length, must be multiple of 8");
}
this.key = key;
}
public byte[] getKey() {
return key;
}
public void decrypt(Path file) {
System.out.println("Loading BSP file " + file.getFileName());
try {
bspFile = new BspFile();
bspFile.load(file);
} catch (IOException ex) {
throw new RuntimeException("Couldn't load BSP file", ex);
}
System.out.println("Reading pakfile lump");
byte[] encEnt = readEncryptedEntities();
if (encEnt == null) {
throw new RuntimeException("This map wasn't protected by BSPProtect");
}
System.out.println("Restoring entities");
Lump entLump = bspFile.getLump(LumpType.LUMP_ENTITIES);
int capacity = encEnt.length;
if (entLump.getLength() > 0) {
capacity += entLump.getLength();
}
ByteBuffer entBuf = ByteBuffer.allocateDirect(capacity);
entBuf.order(bspFile.getByteOrder());
// copy the worldspawn into the new entity lump
if (entLump.getLength() > 0) {
ByteBuffer entBufOld = entLump.getBuffer();
entBufOld.rewind();
entBufOld.limit(entBufOld.limit() - 1); // decrease limit to skip NUL
entBuf.put(entBufOld);
}
// write decypted entity data into the new buffer
try {
InputStream is = new ByteArrayInputStream(encEnt);
// init ICE cipher
IceKey ice = new IceKey(key.length / 8 - 1);
ice.set(key);
final int blockSize = ice.blockSize();
byte[] cipher = new byte[blockSize];
byte[] plain = new byte[blockSize];
for (int read = 0; read != -1; read = is.read(cipher)) {
// decrypt block
ice.decrypt(cipher, plain);
// the last block is not encrypted if not equal to block size
entBuf.put(read == blockSize ? plain : cipher, 0, read);
}
// NUL terminator
entBuf.put((byte) 0);
} catch (IOException ex) {
throw new RuntimeException("Couldn't decrypt entity data", ex);
}
System.out.println("Writing lump file");
// write lump file
try {
Lump entLumpNew = new Lump(LumpType.LUMP_ENTITIES);
entLumpNew.setBuffer(entBuf);
LumpFile lump = new LumpFile(bspFile);
lump.setLump(entLumpNew);
lump.save(bspFile.getNextLumpFile());
} catch (IOException ex) {
throw new RuntimeException("Couldn't write decrypted entity lump file", ex);
}
}
private byte[] readEncryptedEntities() {
try (ZipFile zip = bspFile.getPakFile().getZipFile()) {
Iterator<ZipArchiveEntry> iterator = zip.getEntries(BSPPROTECT_FILE).iterator();
if (iterator.hasNext()) {
return IOUtils.toByteArray(zip.getInputStream(iterator.next()));
}
} catch (IOException ex) {
throw new RuntimeException("Couldn't read pakfile", ex);
}
return null;
}
}
| 2,493 |
375 | <gh_stars>100-1000
package io.lumify.dbpedia.mapreduce;
import io.lumify.core.config.Configuration;
import io.lumify.core.config.HashMapConfigurationLoader;
import io.lumify.core.exception.LumifyException;
import io.lumify.core.mapreduce.LumifyElementMapperBase;
import io.lumify.core.model.properties.LumifyProperties;
import io.lumify.core.model.user.AuthorizationRepository;
import io.lumify.core.model.user.InMemoryAuthorizationRepository;
import io.lumify.core.security.DirectVisibilityTranslator;
import io.lumify.core.security.VisibilityTranslator;
import io.lumify.dbpedia.mapreduce.model.LineData;
import io.lumify.dbpedia.mapreduce.model.LinkValue;
import io.lumify.wikipedia.WikipediaConstants;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Counter;
import org.securegraph.Metadata;
import org.securegraph.Vertex;
import org.securegraph.VertexBuilder;
import org.securegraph.Visibility;
import org.securegraph.accumulo.AccumuloAuthorizations;
import org.securegraph.accumulo.mapreduce.SecureGraphMRUtils;
import java.io.IOException;
import java.util.Map;
public class ImportMRMapper extends LumifyElementMapperBase<LongWritable, Text> {
private static final String DBPEDIA_ID_PREFIX = "DBPEDIA_";
private Counter linesProcessedCounter;
private VisibilityTranslator visibilityTranslator;
private Visibility visibility;
private Visibility defaultVisibility;
private AccumuloAuthorizations authorizations;
public static String getDbpediaEntityVertexId(String pageTitle) {
return DBPEDIA_ID_PREFIX + pageTitle.trim().toLowerCase();
}
private String getEntityHasWikipediaPageEdgeId(Vertex entityVertex, Vertex pageVertex) {
return DBPEDIA_ID_PREFIX + entityVertex.getId() + "_HAS_PAGE_" + pageVertex.getId();
}
@Override
protected void setup(Context context) throws IOException, InterruptedException {
super.setup(context);
this.visibilityTranslator = new DirectVisibilityTranslator();
this.visibility = this.visibilityTranslator.getDefaultVisibility();
this.defaultVisibility = this.visibilityTranslator.getDefaultVisibility();
this.authorizations = new AccumuloAuthorizations();
AuthorizationRepository authorizationRepository = new InMemoryAuthorizationRepository();
try {
Map configurationMap = SecureGraphMRUtils.toMap(context.getConfiguration());
Configuration config = HashMapConfigurationLoader.load(configurationMap);
} catch (Exception e) {
throw new IOException("Could not configure secure graph ontology repository", e);
}
linesProcessedCounter = context.getCounter(DbpediaImportCounters.LINES_PROCESSED);
}
@Override
protected void safeMap(LongWritable key, Text line, Context context) throws Exception {
String lineString = line.toString().trim();
try {
if (lineString.length() == 0) {
return;
}
if (lineString.startsWith("#")) {
return;
}
LineData lineData = LineData.parse(lineString);
Vertex dbpediaEntityVertex = createDbpediaEntityVertex(lineData);
if (lineData.getValue() instanceof LinkValue) {
LinkValue linkValue = (LinkValue) lineData.getValue();
if (!lineData.getPropertyIri().equals("http://www.w3.org/1999/02/22-rdf-syntax-ns#type")) {
createLinkToDbpediaEntity(lineData, dbpediaEntityVertex, linkValue);
}
}
linesProcessedCounter.increment(1);
} catch (Throwable ex) {
throw new LumifyException("Could not process line: " + lineString, ex);
}
}
private void createLinkToDbpediaEntity(LineData lineData, Vertex pageVertex, LinkValue linkValue) {
String linkedPageVertexId = WikipediaConstants.getWikipediaPageVertexId(linkValue.getPageTitle());
VertexBuilder linkedPageVertexBuilder = prepareVertex(linkedPageVertexId, visibility);
LumifyProperties.CONCEPT_TYPE.setProperty(linkedPageVertexBuilder, WikipediaConstants.WIKIPEDIA_PAGE_CONCEPT_URI, visibility);
Metadata linkedTitleMetadata = new Metadata();
LumifyProperties.CONFIDENCE.setMetadata(linkedTitleMetadata, 0.1, defaultVisibility);
LumifyProperties.TITLE.addPropertyValue(linkedPageVertexBuilder, ImportMR.MULTI_VALUE_KEY, linkValue.getPageTitle(), linkedTitleMetadata, visibility);
Vertex linkedPageVertex = linkedPageVertexBuilder.save(authorizations);
String label = lineData.getPropertyIri();
String edgeId = pageVertex.getId() + "_" + label + "_" + linkedPageVertex.getId();
addEdge(edgeId, pageVertex, linkedPageVertex, label, visibility, authorizations);
}
private Vertex createDbpediaEntityVertex(LineData lineData) {
Vertex pageVertex = createPageVertex(lineData);
String dbpediaEntityVertexId = getDbpediaEntityVertexId(lineData.getPageTitle());
VertexBuilder entityVertexBuilder = prepareVertex(dbpediaEntityVertexId, visibility);
Metadata conceptTypeMetadata = new Metadata();
LumifyProperties.CONFIDENCE.setMetadata(conceptTypeMetadata, 0.1, defaultVisibility);
LumifyProperties.CONCEPT_TYPE.addPropertyValue(entityVertexBuilder, ImportMR.MULTI_VALUE_KEY, "http://www.w3.org/2002/07/owl#Thing", conceptTypeMetadata, visibility);
Metadata titleMetadata = new Metadata();
LumifyProperties.CONFIDENCE.setMetadata(titleMetadata, 0.1, defaultVisibility);
LumifyProperties.TITLE.addPropertyValue(entityVertexBuilder, ImportMR.MULTI_VALUE_KEY, lineData.getPageTitle(), titleMetadata, visibility);
if (lineData.getPropertyIri().equals("http://www.w3.org/1999/02/22-rdf-syntax-ns#type") && lineData.getValue() instanceof LinkValue) {
LinkValue linkValue = (LinkValue) lineData.getValue();
}
if (!(lineData.getValue() instanceof LinkValue)) {
String multiValueKey = lineData.getValue().getValueString();
entityVertexBuilder.addPropertyValue(multiValueKey, lineData.getPropertyIri(), lineData.getValue().getValue(), visibility);
}
Vertex entityVertex = entityVertexBuilder.save(authorizations);
String edgeId = getEntityHasWikipediaPageEdgeId(entityVertex, pageVertex);
addEdge(edgeId, entityVertex, pageVertex, DbpediaOntology.EDGE_LABEL_ENTITY_HAS_WIKIPEDIA_PAGE, visibility, authorizations);
return entityVertex;
}
private Vertex createPageVertex(LineData lineData) {
String wikipediaPageVertexId = WikipediaConstants.getWikipediaPageVertexId(lineData.getPageTitle());
VertexBuilder pageVertexBuilder = prepareVertex(wikipediaPageVertexId, visibility);
LumifyProperties.CONCEPT_TYPE.setProperty(pageVertexBuilder, WikipediaConstants.WIKIPEDIA_PAGE_CONCEPT_URI, visibility);
Metadata titleMetadata = new Metadata();
LumifyProperties.CONFIDENCE.setMetadata(titleMetadata, 0.1, defaultVisibility);
LumifyProperties.TITLE.addPropertyValue(pageVertexBuilder, ImportMR.MULTI_VALUE_KEY, lineData.getPageTitle(), titleMetadata, visibility);
return pageVertexBuilder.save(authorizations);
}
}
| 2,676 |
329 | <filename>java/src/main/java/com/cloudera/api/model/ApiHdfsCloudReplicationArguments.java<gh_stars>100-1000
// Copyright (c) 2016 Cloudera, Inc. All rights reserved.
package com.cloudera.api.model;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
import com.cloudera.api.ApiUtils;
import com.google.common.base.Objects;
import com.google.common.base.Preconditions;
import org.apache.cxf.common.util.StringUtils;
@XmlRootElement(name = "hdfsCloudReplicationArguments")
public class ApiHdfsCloudReplicationArguments extends ApiHdfsReplicationArguments {
/**
* Source Account during replication. If this is non-null,
* destinationAccount should be null
*/
private String sourceAccount;
/**
* destination Account during replication. If this is non-null,
* sourceAccount should be null
*/
private String destinationAccount;
// For JAX-B
public ApiHdfsCloudReplicationArguments() {
}
public ApiHdfsCloudReplicationArguments(ApiServiceRef sourceService,
String sourcePath, String destinationPath,
String mapreduceServiceName, Integer numMaps, String userName,
String sourceAccount, String destinationAccount) {
super(sourceService, sourcePath, destinationPath, mapreduceServiceName,
numMaps, userName);
this.sourceAccount = sourceAccount;
this.destinationAccount = destinationAccount;
}
@XmlElement
public String getSourceAccount() {
return sourceAccount;
}
public void setSourceAccount(String sourceAccount) {
this.sourceAccount = sourceAccount;
}
@XmlElement
public String getDestinationAccount() {
return destinationAccount;
}
public void setDestinationAccount(String destinationAccount) {
this.destinationAccount = destinationAccount;
}
@Override
public String toString() {
if (!StringUtils.isEmpty(sourceAccount)) {
return super.toStringHelper()
.add("sourceAccount", sourceAccount)
.toString();
} else if (!StringUtils.isEmpty(destinationAccount)) {
return super.toStringHelper()
.add("destinationAccount", destinationAccount)
.toString();
}
Preconditions.checkState(false, "Both sourceAccount and destinationAccount are null " +
"in ApiHdfsCloudReplicationArguments");
return null;
}
@Override
public boolean equals(Object o) {
ApiHdfsCloudReplicationArguments other = ApiUtils.baseEquals(this, o);
return this == other || (other != null &&
super.equals(other) &&
Objects.equal(sourceAccount, other.sourceAccount) &&
Objects.equal(destinationAccount, other.destinationAccount));
}
@Override
public int hashCode() {
return Objects.hashCode(super.hashCode(), sourceAccount, destinationAccount);
}
}
| 923 |
379 | <reponame>oimchat/oim-fx<filename>client/oim-client-ui-fx/src/main/java/com/oim/ui/fx/classics/find/FindGroupPane.java
package com.oim.ui.fx.classics.find;
import com.oim.fx.common.component.SearchBox;
import javafx.event.ActionEvent;
import javafx.event.EventHandler;
import javafx.scene.control.Button;
import javafx.scene.layout.AnchorPane;
import javafx.scene.layout.Priority;
import javafx.scene.layout.VBox;
/**
* @author: XiaHui
* @date: 2017年3月27日 下午4:30:11
*/
public class FindGroupPane extends VBox {
AnchorPane rootPane = new AnchorPane();
SearchBox searchBox = new SearchBox();
Button searchButton = new Button();
QueryPageListPane queryListPane = new QueryPageListPane();
public FindGroupPane() {
initComponent();
iniEvent();
}
private void initComponent() {
this.getChildren().add(rootPane);
this.getChildren().add(queryListPane);
VBox.setVgrow(queryListPane, Priority.ALWAYS);
rootPane.setMinHeight(60);
rootPane.setPrefHeight(60);
rootPane.setStyle("-fx-background-color:rgba(240, 240, 240, 0.9)");
searchBox.setPromptText("请输入 群号码/名称/关键字");
searchBox.setLayoutX(50);
searchBox.setLayoutY(15);
searchBox.setPrefSize(600, 25);
searchButton.setText("查找");
searchButton.setLayoutX(660);
searchButton.setLayoutY(15);
searchButton.setPrefSize(100, 26);
rootPane.getChildren().add(searchBox);
rootPane.getChildren().add(searchButton);
queryListPane.setVgap(10);
queryListPane.setHgap(10);
}
private void iniEvent() {
}
public void initData(){
searchBox.setText("");
}
public void setSearchAction(EventHandler<ActionEvent> value) {
searchButton.setOnAction(value);
}
public String getText() {
return searchBox.getText();
}
public QueryPageListPane getQueryListPane(){
return queryListPane;
}
}
| 735 |
739 | """
* Copyright 2009 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http:#www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHDIR
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
"""
@TagName(VideoElement.TAG)
class VideoElement(MediaElement):
TAG = "video"
def create(self):
return Document.get().createElement(TAG).cast()
def __init__(self):
pass
def getWidth(self):
JS("""
return this['width'];
""")
def setWidth(self, width):
JS("""
this['width'] = width;
""")
def getHeight(self):
JS("""
return this['height'];
""")
def setHeight(self, height):
JS("""
this['height'] = height;
""")
def getVideoWidth(self):
JS("""
return this['videoWidth'];
""")
def getVideoHeight(self):
JS("""
return this['videoHeight'];
""")
def getPoster(self):
JS("""
return this['poster'];
""")
def setPoster(self, url):
JS("""
this['poster'] = url;
""")
| 615 |
2,069 | <reponame>MatthewEskolin/Exceptionless
[{"Id":null,"OrganizationId":null,"ProjectId":null,"StackId":null,"IsFirstOccurrence":false,"IsFixed":false,"IsHidden":false,"CreatedUtc":"0001-01-01T00:00:00","Idx":{},"Type":"usage","Source":"app.Dashboard.Recent.events.nextPage","Date":"2020-01-01T00:00:00-06:00","Tags":["UI"],"Message":null,"Geo":null,"Value":null,"Data":{"@version":"2.2.588","@user":{"Identity":"4b7dbfd1-abb0-4389-b602-aa104464f07f","Name":"146","Data":{}},"@request":{"UserAgent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_1) AppleWebKit/601.2.7 (KHTML, like Gecko) Version/9.0.1 Safari/601.2.7","HttpMethod":null,"IsSecure":true,"Host":"172.16.17.32","Port":80,"Path":"/et/aliquyam","Referrer":null,"ClientIpAddress":null,"Cookies":{},"PostData":null,"QueryString":null,"Data":{}}},"ReferenceId":null,"SessionId":null}] | 311 |
45,293 | <filename>compiler/tests-spec/testData/diagnostics/linked/sectionsMap.json<gh_stars>1000+
{
"type-inference": [
"smart-casts/smart-cast-sink-stability",
"local-type-inference",
"smart-casts/smart-cast-types"
],
"overload-resolution": [
"building-the-overload-candidate-set-ocs/operator-call",
"building-the-overload-candidate-set-ocs/call-with-an-explicit-receiver",
"choosing-the-most-specific-candidate-from-the-overload-candidate-set/rationale-1",
"choosing-the-most-specific-candidate-from-the-overload-candidate-set/algorithm-of-msc-selection",
"callables-and-invoke-convention",
"building-the-overload-candidate-set-ocs/call-with-trailing-lambda-expressions",
"building-the-overload-candidate-set-ocs/call-with-named-parameters",
"building-the-overload-candidate-set-ocs/call-with-specified-type-parameters",
"building-the-overload-candidate-set-ocs/infix-function-call",
"determining-function-applicability-for-a-specific-call/description",
"building-the-overload-candidate-set-ocs/call-without-an-explicit-receiver",
"c-level-partition",
"receivers",
"building-the-overload-candidate-set-ocs/call-with-an-explicit-receiver/call-with-an-explicit-type-receiver",
"resolving-callable-references/bidirectional-resolution-for-callable-calls",
"resolving-callable-references/resolving-callable-references-not-used-as-arguments-to-a-call",
"resolving-callable-references",
"determining-function-applicability-for-a-specific-call/rationale"
],
"type-system": [
"type-kinds/built-in-types/kotlin.any",
"introduction-1",
"type-kinds/built-in-types/kotlin.nothing",
"subtyping/subtyping-rules",
"subtyping",
"subtyping/subtyping-for-intersection-types",
"type-contexts-and-scopes/inner-and-nested-type-contexts",
"subtyping/subtyping-for-nullable-types",
"type-kinds/type-parameters"
],
"declarations": [
"classifier-declaration/class-declaration/abstract-classes",
"classifier-declaration/class-declaration/constructor-declaration",
"classifier-declaration/class-declaration",
"classifier-declaration/data-class-declaration",
"classifier-declaration/class-declaration/nested-and-inner-classifiers",
"property-declaration/local-property-declaration",
"property-declaration/property-initialization",
"type-alias",
"classifier-declaration/classifier-initialization",
"function-declaration"
],
"inheritance": [
"overriding"
],
"statements": [
"assignments/operator-assignments",
"assignments",
"assignments/simple-assignments",
"loop-statements/while-loop-statement",
"loop-statements/do-while-loop-statement"
],
"expressions": [
"not-null-assertion-expression",
"comparison-expressions",
"when-expression",
"when-expression/exhaustive-when-expressions",
"constant-literals/real-literals",
"constant-literals/integer-literals/decimal-integer-literals",
"constant-literals/integer-literals/binary-integer-literals",
"constant-literals/integer-literals/hexadecimal-integer-literals",
"constant-literals/boolean-literals",
"constant-literals/character-literals",
"constant-literals/the-types-for-integer-literals",
"logical-disjunction-expression",
"logical-conjunction-expression",
"additive-expression",
"type-checking-and-containment-checking-expressions/type-checking-expression",
"type-checking-and-containment-checking-expressions/containment-checking-expression",
"try-expression",
"elvis-operator-expression",
"multiplicative-expression",
"range-expression",
"equality-expressions/value-equality-expressions",
"jump-expressions/break-expression",
"jump-expressions/return-expressions",
"jump-expressions/continue-expression",
"jump-expressions",
"conditional-expression",
"built-in-types-and-their-semantics/kotlin.nothing-1",
"built-in-types-and-their-semantics/kotlin.unit",
"prefix-expressions/unary-minus-expression",
"prefix-expressions/logical-not-expression",
"prefix-expressions/unary-plus-expression",
"prefix-expressions/prefix-increment-expression",
"prefix-expressions/prefix-decrement-expression",
"call-and-property-access-expressions/callable-references",
"call-and-property-access-expressions/navigation-operators",
"function-literals/lambda-literals"
],
"overloadable-operators": [
""
],
"built-in-types-and-their-semantics": [
"built-in-integer-types-1/integer-type-widening"
],
"control--and-data-flow-analysis": [
"control-flow-graph/expressions-1/conditional-expressions",
"performing-analysis-on-the-control-flow-graph/variable-initialization-analysis"
],
"annotations": [
"annotation-targets"
]
} | 1,810 |
1,374 | <gh_stars>1000+
package def.dom;
public class HTMLMenuElement extends HTMLElement {
public java.lang.Boolean compact;
public java.lang.String type;
public static HTMLMenuElement prototype;
public HTMLMenuElement(){}
}
| 74 |
1,457 | <gh_stars>1000+
// Copyright (c) Facebook, Inc. and its affiliates.
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
#include "esp/geo/Geo.h"
#include <Magnum/Math/FunctionsBatch.h>
#include <Magnum/Primitives/Circle.h>
#include <Magnum/Trade/MeshData.h>
#include <cmath>
#include <numeric>
namespace Mn = Magnum;
namespace Cr = Corrade;
namespace esp {
namespace geo {
std::vector<vec2f> convexHull2D(const std::vector<vec2f>& points) {
CORRADE_INTERNAL_ASSERT(points.size() > 2);
auto cross = [](const vec2f& o, const vec2f& a, const vec2f& b) {
return (a(0) - o(0)) * (b(1) - o(1)) - (a(1) - o(1)) * (b(0) - o(0));
};
// Sort indices of points lexicographically
std::vector<size_t> idx(points.size());
std::iota(idx.begin(), idx.end(), 0);
std::sort(
idx.begin(), idx.end(), [&points](const size_t& a, const size_t& b) {
return points[a](0) < points[b](0) ||
(points[a](0) == points[b](0) && points[a](1) < points[b](1));
});
std::vector<size_t> hullIdx(2 * idx.size());
// Build lower hull
int k = 0;
for (size_t i = 0; i < idx.size(); ++i) {
while (k >= 2 && cross(points[hullIdx[k - 2]], points[hullIdx[k - 1]],
points[idx[i]]) <= 0) {
k--;
}
hullIdx[k++] = idx[i];
}
// Build upper hull
for (size_t i = idx.size() - 1, t = k + 1; i > 0; i--) {
while (k >= t && cross(points[hullIdx[k - 2]], points[hullIdx[k - 1]],
points[idx[i - 1]]) <= 0) {
k--;
}
hullIdx[k++] = idx[i - 1];
}
hullIdx.resize(k - 1);
std::vector<vec2f> hull;
hull.reserve(hullIdx.size());
for (auto& ix : hullIdx) {
hull.emplace_back(points[ix]);
}
return hull;
}
/**
* Assume the aabb is expressed as the center 'c' and the extent 'e'.
* each corner X is nothing but a combination from
* (c_x +/- e_x, c_y +/- e_y, c_z +/- e_z)
*
* Denote y = (+/- e_x, +/- e_y, +/- e_z)
*
* The corner is transformed by:
* x = R * x0 + t,
* where x_0, x are the coordinates before and after transformation, t is the
* translation.
*
* x = R * (c0 + y0) + t = (Rc0 + t) + Ry0 eq(1)
*
* Our Goal is to find the x_max and x_min after the transformation.
*
* First, determining the x_max:
*
* In eq(1), Rc0 + t is a constant, which means max{x} iff max{Ry0}
*
* R looks like:
* [R0, R1, R2]
* [R3, R4, R5]
* [R6, R7, R8]
*
* We focus on the 1st entry (the 'x' entry) of Ry0:
* y_x =<(R0, R1, R2), (+/- e_x, +/- e_y, +/- e_z)>
* =<(+/- R0, +/- R1, +/- R2), (e_x, e_y, e_z)>
*
* Now, note that e_x, e_y, e_z are all positive values for any non-degenerate
* aabb.
*
* So y_x reaches MAX when +/- R0, +/- R1, +/- R2 are all >=0
* that means max{y_x} = <(|R0|, |R1|, |R2|), (e_x, e_y, e_z)>
* (|R0| means the absolute value of R0)
*
* and likewise for y_y, y_z
*
* The derivation for x_min is similar since same logic applies.
*/
Mn::Range3D getTransformedBB(const Mn::Range3D& range,
const Mn::Matrix4& xform) {
// compute the absolute value of the rotationScaling part of the original
// transformation matrix
auto absRotationScaling = Mn::Matrix3x3::fromVector(
Mn::Math::abs(xform.rotationScaling().toVector()));
const Mn::Vector3 center = range.center();
const Mn::Vector3 extent = range.size() / 2.0;
// compute Rc0 + t
Mn::Vector3 newCenter = xform.transformPoint(center);
// compute max{Ry0}
Mn::Vector3 newExtent = absRotationScaling * extent;
return Mn::Range3D::fromCenter(newCenter, newExtent);
}
float calcWeightedDistance(const Mn::Vector3& a,
const Mn::Vector3& b,
float alpha) {
// embed square root from distance calc
alpha *= .5;
// calc t value based on L2 norm of displacement between a and b raised to
// passed alpha power.
Mn::Vector3 d = b - a;
float squareDist = dot(d, d);
return Mn::Math::pow(squareDist, alpha);
}
void buildCatmullRomTraj4Points(const std::vector<Mn::Vector3>& pts,
const std::vector<float>& ptKnotVals,
std::vector<Mn::Vector3>& trajectory,
int stIdx,
int numInterp) {
// t values are based on distances between sequential points and type of
// spline
float t0 = 0.0f;
float t1 = ptKnotVals[stIdx];
float t2 = ptKnotVals[stIdx + 1] + t1;
float t3 = ptKnotVals[stIdx + 2] + t2;
float incr = (t2 - t1) / (1.0f * numInterp);
for (int i = 0; i < numInterp; ++i) {
float t = t1 + i * incr;
// don't allow float error to cause t to go past 3rd interpolated point in
// spline
t = (t > t2) ? t2 : t;
Mn::Vector3 A0 = interp2Points(pts[stIdx], t0, pts[stIdx + 1], t1, t);
Mn::Vector3 A1 = interp2Points(pts[stIdx + 1], t1, pts[stIdx + 2], t2, t);
Mn::Vector3 A2 = interp2Points(pts[stIdx + 2], t2, pts[stIdx + 3], t3, t);
Mn::Vector3 B0 = interp2Points(A0, t0, A1, t2, t);
Mn::Vector3 B1 = interp2Points(A1, t1, A2, t3, t);
// resultant point will be between t1 and t2 in this 4-point spline
trajectory.emplace_back(interp2Points(B0, t1, B1, t2, t));
}
} // buildCatmullRomTraj4Points
std::vector<Mn::Vector3> buildCatmullRomTrajOfPoints(
const std::vector<Mn::Vector3>& pts,
int numInterp,
float alpha) {
// enforce alpha limits
alpha = clamp(alpha, 0.0f, 1.0f);
// points in trajectory
std::vector<Mn::Vector3> trajectory;
std::vector<Mn::Vector3> tmpPoints;
std::vector<float> ptKnotVals;
// build padded array of points to use to synthesize centripetal catmul-rom
// trajectory by adding "ghost" point so we start drawing from initial point
// in trajectory.
tmpPoints.emplace_back(pts[0] - (pts[1] - pts[0]));
ptKnotVals.emplace_back(calcWeightedDistance(tmpPoints[0], pts[0], alpha));
for (int i = 0; i < pts.size(); ++i) {
tmpPoints.emplace_back(pts[i]);
ptKnotVals.emplace_back(
calcWeightedDistance(tmpPoints[i], tmpPoints[i + 1], alpha));
}
// add final ghost point in trajectory
int lastIdx = pts.size() - 1;
tmpPoints.emplace_back(pts[lastIdx] + (pts[lastIdx] - pts[lastIdx - 1]));
ptKnotVals.emplace_back(calcWeightedDistance(
tmpPoints[tmpPoints.size() - 2], tmpPoints[tmpPoints.size() - 1], alpha));
for (int i = 0; i < tmpPoints.size() - 3; ++i) {
buildCatmullRomTraj4Points(tmpPoints, ptKnotVals, trajectory, i, numInterp);
}
return trajectory;
} // buildCatmullRomTrajOfPoints
std::vector<float> getPointDistsAlongTrajectory(
const std::vector<Mn::Vector3>& pts) {
std::vector<float> dists;
dists.emplace_back(0.0f);
for (int i = 1; i < pts.size(); ++i) {
dists.emplace_back(dists[i - 1] +
calcWeightedDistance(pts[i - 1], pts[i], 1.0f));
}
return dists;
} // getPointDistsAlongTrajectory
Mn::Trade::MeshData buildTrajectoryTubeSolid(
const std::vector<Mn::Vector3>& pts,
int numSegments,
float radius,
bool smooth,
int numInterp) {
// 1. Build smoothed trajectory through passed points if requested
// points in trajectory
// A centripetal CR spline (alpha == .5) will not have cusps, while remaining
// true to underlying key point trajectory.
float alpha = .5;
std::vector<Mn::Vector3> trajectory =
smooth ? buildCatmullRomTrajOfPoints(pts, numInterp, alpha) : pts;
// size of trajectory
const Mn::UnsignedInt trajSize = trajectory.size();
// 2. Build mesh vertex points around each trajectory point at appropriate
// distance (radius). For each point in trajectory, add a wireframe circle
// centered at that point, appropriately oriented based on tangents
Cr::Containers::Array<Magnum::Vector3> circleVerts =
Mn::Primitives::circle3DWireframe(numSegments).positions3DAsArray();
// normalized verts
Cr::Containers::Array<Magnum::Vector3> circleNormVerts{
Cr::NoInit, sizeof(Magnum::Vector3) * numSegments};
// transform points to be on circle of given radius, and make copy to
// normalize points
for (int i = 0; i < numSegments; ++i) {
circleVerts[i] *= radius;
circleNormVerts[i] = circleVerts[i].normalized();
}
// # of vertices in resultant tube == # circle verts * # points in trajectory
const Mn::UnsignedInt vertexCount = numSegments * trajSize + 2;
// a function-local struct representing a vertex
struct Vertex {
Mn::Vector3 position;
Mn::Vector3 normal;
};
// Vertex data storage
Cr::Containers::Array<char> vertexData{Cr::NoInit,
sizeof(Vertex) * vertexCount};
// Cast memory to be a strided array so it can be accessed via slices.
Cr::Containers::StridedArrayView1D<Vertex> vertices =
Cr::Containers::arrayCast<Vertex>(vertexData);
// Position and normal views of vertex array
Cr::Containers::StridedArrayView1D<Mn::Vector3> positions =
vertices.slice(&Vertex::position);
Cr::Containers::StridedArrayView1D<Mn::Vector3> normals =
vertices.slice(&Vertex::normal);
Mn::UnsignedInt circlePtIDX = 0;
Mn::Vector3 tangent = trajectory[1] - trajectory[0];
// get the orientation matrix assuming y-up preference
Mn::Matrix4 tangentOrientation = Mn::Matrix4::lookAt(
trajectory[0], trajectory[0] + tangent, Mn::Vector3{0, 1.0, 0});
for (int i = 0; i < numSegments; ++i) {
// build vertex (circleVerts[i] is at radius)
positions[circlePtIDX] = tangentOrientation.transformPoint(circleVerts[i]);
// pre-rotated normal for circle is normalized point
normals[circlePtIDX] =
tangentOrientation.transformVector(circleNormVerts[i]);
++circlePtIDX;
}
// add cap vert at the end of the list
// build vertex (circleVerts[i] is at radius)
positions[vertexCount - 2] = trajectory[0];
// pre-rotated normal for circle is normalized point
normals[vertexCount - 2] =
tangentOrientation.transformVector({0.0f, 0.0f, -1.0f});
for (Mn::UnsignedInt vertIx = 1; vertIx < trajSize - 1; ++vertIx) {
const Mn::Vector3& vert = trajectory[vertIx];
Mn::Vector3 pTangent = vert - trajectory[vertIx - 1];
Mn::Vector3 nTangent = trajectory[vertIx + 1] - vert;
tangent = (pTangent + nTangent) / 2.0;
// get the orientation matrix assuming y-up preference
tangentOrientation =
Mn::Matrix4::lookAt(vert, vert + tangent, Mn::Vector3{0, 1.0, 0});
for (int i = 0; i < numSegments; ++i) {
// build vertex (circleVerts[i] is at radius)
positions[circlePtIDX] =
tangentOrientation.transformPoint(circleVerts[i]);
// pre-rotated normal for circle is normalized point
normals[circlePtIDX] =
tangentOrientation.transformVector(circleNormVerts[i]);
++circlePtIDX;
}
}
int idx = trajSize - 1;
tangent = trajectory[idx] - trajectory[idx - 1];
// get the orientation matrix assuming y-up preference
tangentOrientation = Mn::Matrix4::lookAt(
trajectory[idx], trajectory[idx] + tangent, Mn::Vector3{0, 1.0, 0});
for (int i = 0; i < numSegments; ++i) {
// build vertex (circleVerts[i] is at radius)
positions[circlePtIDX] = tangentOrientation.transformPoint(circleVerts[i]);
// pre-rotated normal for circle is normalized point
normals[circlePtIDX] =
tangentOrientation.transformVector(circleNormVerts[i]);
++circlePtIDX;
}
// add cap verts
// build vertex (circleVerts[i] is at radius)
positions[vertexCount - 1] = trajectory[idx];
// pre-rotated normal for circle is normalized point
normals[vertexCount - 1] =
tangentOrientation.transformVector({0.0f, 0.0f, 1.0f});
// 3. Create polys between all points
Cr::Containers::Array<char> indexData{
Cr::NoInit, 6 * numSegments * trajSize * sizeof(Mn::UnsignedInt)};
Cr::Containers::ArrayView<Mn::UnsignedInt> indices =
Cr::Containers::arrayCast<Mn::UnsignedInt>(indexData);
// create triangle indices for each tube pair correspondance - cw winding
/*
+n---+n+1
| \ F2|
| \ |
|F1 \ |
+0---+1
F1 = [+0, +n, +1]
F2 = [+1, +n, +n+1]
*/
int iListIDX = 0;
for (Mn::UnsignedInt vIdx = 0; vIdx < trajSize - 1;
++vIdx) { // skip last circle (adding forward)
int vIdxNumSeg = vIdx * numSegments;
for (Mn::UnsignedInt circleIx = 0; circleIx < numSegments; ++circleIx) {
Mn::UnsignedInt ix = circleIx + vIdxNumSeg; //+0
Mn::UnsignedInt ixNext = ix + numSegments; //+n
Mn::UnsignedInt ixPlus = ix + 1; //+1
Mn::UnsignedInt ixNextPlus = ixNext + 1; //+n+1
if (circleIx == numSegments - 1) {
// last vert in a circle wraps to relative 0
ixPlus = vIdxNumSeg;
ixNextPlus = vIdxNumSeg + numSegments;
}
// F1
indices[iListIDX++] = ix;
indices[iListIDX++] = ixNext;
indices[iListIDX++] = ixPlus;
// F2
indices[iListIDX++] = ixPlus;
indices[iListIDX++] = ixNext;
indices[iListIDX++] = ixNextPlus;
}
}
int offset = numSegments * (trajSize - 1);
// end caps - verts added at the end of the vertices array
for (Mn::UnsignedInt circleIx = 0; circleIx < numSegments; ++circleIx) {
// first endcap
Mn::UnsignedInt ix = circleIx;
Mn::UnsignedInt ixPlus = (ix + 1) % numSegments; //+1
indices[iListIDX++] = (ix);
indices[iListIDX++] = (ixPlus);
indices[iListIDX++] = (vertexCount - 2);
// last endcap
ix += offset;
ixPlus += offset; //+1
indices[iListIDX++] = (ixPlus);
indices[iListIDX++] = (ix);
indices[iListIDX++] = (vertexCount - 1);
}
// Finally, make the MeshData. The indices have to be constructed first
// because function argument evaluation order is not guaranteed and so you
// might end up with the move happening before the MeshIndexData construction,
// which would result in 0 indices)
// Building the mesh this way obviates the need for an interleaving step
Mn::Trade::MeshData meshData{
Mn::MeshPrimitive::Triangles,
std::move(indexData),
Mn::Trade::MeshIndexData{indices},
std::move(vertexData),
{Mn::Trade::MeshAttributeData{Mn::Trade::MeshAttribute::Position,
positions},
Mn::Trade::MeshAttributeData{Mn::Trade::MeshAttribute::Normal, normals}},
static_cast<Mn::UnsignedInt>(positions.size())};
return meshData;
} // ResourceManager::trajectoryTubeSolid
} // namespace geo
} // namespace esp
| 6,008 |
635 | <filename>tests/test_exponential_moving_average.py
from __future__ import absolute_import
import unittest
import numpy as np
from tests.sample_data import SampleData
from pyti import exponential_moving_average
class TestExponentialMovingAverage(unittest.TestCase):
def setUp(self):
"""Create data to use for testing."""
self.data = SampleData().get_sample_close_data()
self.ema_period_6_expected = [np.nan, np.nan, np.nan, np.nan, np.nan,
807.02732709950601, 809.36943856347534, 811.57440210146626,
812.18585332862858, 811.51167137144193, 813.03754508743032,
814.78320748059286, 815.07968870069783, 812.70892143025173,
811.64434152748379, 806.29546851721182, 801.00157100290141,
792.08458029483256, 786.49900395985253, 778.3013324315848,
772.33519779659707, 772.50392182231633, 777.2539741629422,
780.11334548733646, 780.22474319767912, 780.51104642045016,
782.69806614130005, 783.68893299615797, 780.89411393397643,
782.41484297812269, 781.70954696934052, 779.67022014427982,
770.7224102172039, 770.44497569199416, 773.44687093232972,
783.48082470791189, 793.02367697796615, 796.77266839175115,
793.51471418489757, 790.7359068454482, 790.42996530228174,
794.72516192268495, 798.969814945503, 805.61978887320629,
810.55747667215576, 815.60169077864032, 821.05718458401952,
826.57636791343214, 826.47700776287923, 825.18272582921668,
826.1538565435585, 824.79463420371678, 818.71476966204045,
813.31937877362213, 809.80528718732853, 809.93518760291715,
809.06422194777713, 810.20271289108439, 807.22227750333252,
805.97225868423118, 804.49261957970691, 803.61321708617584,
802.28731200501852, 802.35140888418425, 802.51831490629661,
804.97850741002128, 807.03748451344779, 805.77779150003926,
809.0392597820121, 811.53403022818156, 809.78031561201294,
806.4732584489924, 802.63537755822165, 801.09762879322523,
800.53466262840118, 796.54081627852281, 793.59261722731912,
795.04414823962998, 792.86631498470945, 795.92903689328011,
799.67082509997647, 802.82468027130881, 801.47996432212028,
798.27529600878233, 795.92801595702986, 794.30503979455818,
794.20536030737867, 793.27622206539638, 792.46785952324944,
792.88759409550698, 794.24034795734349, 795.23463302752293,
796.66375656708226, 798.81968948482722, 801.32334431114248,
801.6173631694503, 803.26812161844305, 804.7906061318904,
806.13884282129698, 807.06176840743376, 807.27140300321491,
806.86492413549763, 806.92596173449397, 803.73258056927784,
801.95149062965572, 801.06195973496426, 800.74094193523092,
797.44717086175808, 786.61856680780988, 778.0323647377088,
770.40297184976089, 764.85806555320335, 761.44140711989326,
757.8164798478789, 757.22274386418894, 755.75882674664786,
754.83301380067439, 748.38859307613905, 743.76443287853829,
738.5922990668862, 735.77938465459113, 732.1812553908884,
726.51673057319852, 719.77341547086951, 715.51271936015075,
711.40166686269902, 710.0985836665883]
self.ema_period_8_expected = [np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, 809.87511039506569, 811.14736952710587,
811.09599978863753, 812.48872650228316, 813.96252793740246,
814.52771939857178, 812.99372919527764, 812.11173938413049,
807.65864531118575, 803.33854386415715, 796.14670001705917,
790.87683374568701, 783.17048528992075, 777.2309249266134,
775.8304605546067, 778.16722678713973, 779.53886446459842,
779.29852809941121, 779.3290882690593, 780.82226260879258,
782.40511944985656, 781.37506207512934, 782.79161899065286,
781.84175391610734, 779.99165577148403, 773.11532262769742,
772.5627959085798, 774.11810917839, 781.82624061850902,
789.76443770331605, 793.03543700431749, 790.42160267604254,
788.83391977290876, 789.95146636171103, 794.56126880482566,
798.98732308368244, 804.03906079193348, 807.18156914182555,
811.18641356239164, 816.67544684018435, 822.75207855925862,
824.26737563837924, 824.23623220531567, 825.46107894550505,
824.64493486977085, 820.32714212894632, 816.23089343681863,
812.68986623703938, 811.66180366569108, 810.69905864129316,
811.21404131385952, 807.97335057261012, 806.25334204783451,
804.9442751687684, 804.40335283483284, 803.38213646078316,
803.26784477033391, 802.89019773552445, 804.34226090018967,
806.1060618353348, 805.3232726007675, 807.93655251821804,
810.16951744598987, 809.18718395619112, 806.94963093102501,
804.13170236672374, 802.34700327880125, 801.77234500905536,
798.83962250602974, 795.68916879227265, 795.79629463825916,
793.6017666981744, 795.75985396621218, 798.91062051364315,
801.28026127195176, 800.15038658094875, 798.2381561453916,
796.41295006855876, 795.17742159363036, 795.57243103091264,
794.7191098671957, 793.38581236159519, 792.9379151620949,
793.73986831901368, 794.64393786425182, 796.09313991929616,
797.94457842299983, 799.93565469804128, 800.48826015022848,
802.20794357854959, 803.74611980498867, 805.13480928074523,
806.27694022679111, 806.81333826905916, 806.54670211941868,
806.68376546111369, 804.4519883831091, 802.98075187919505,
802.10690326463885, 801.59812612118697, 798.72924088405341,
789.98970314243491, 782.27661908667801, 775.11069680263267,
769.83646620345746, 766.12849050478098, 761.95371219561662,
759.36356471608758, 756.73721437200663, 755.51683595158204,
750.19245419978381, 746.24332981080386, 741.55464579345698,
738.62486413897159, 735.15102596486452, 729.94038660079752,
723.31451227190007, 718.60702875656625, 714.50252417525724,
712.67957960319632]
self.ema_period_10_expected = [np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, 809.99039688880748, 811.67453684590134,
813.22847318315848, 813.91651022862595, 812.76527617212628,
812.2090913585198, 808.70134161204544, 804.98299701868734,
798.64424236643993, 793.96274569120294, 787.31717043461151,
781.68851160462771, 779.44614091252322, 780.5318424369849,
780.86352927685948, 779.916926582359, 779.29360447039278,
780.14843117919452, 781.23758237974334, 780.21630466674264,
781.90974078036231, 782.00080325694739, 780.73668949870898,
774.7108752938866, 773.77607304761852, 774.96296482108244,
781.28236464435429, 787.62561367683224, 790.73113345838124,
789.12487913780558, 787.75188722864561, 788.11169085595952,
792.12574089557836, 796.95795325533288, 802.41747427530015,
806.17897860889479, 809.53991887309382, 813.46420064097413,
818.46624839172728, 820.69639789578116, 821.9798497892391,
824.00093081866214, 823.99648805313529, 820.79071836042056,
817.42451115753647, 814.6235183649203, 813.81299847785272,
812.50558728853628, 812.19911448449648, 809.45082767114388,
807.76868077944778, 805.8495954837955, 804.76525511050511,
803.78551862820905, 803.82017582125775, 803.56493570344242,
804.69756953340948, 805.82713113326975, 804.92416936107236,
807.15125710108703, 809.11445138068439, 808.47912819297937,
806.81247219217903, 804.59475153029712, 803.21828193454007,
802.81696057198678, 800.01677599780612, 797.34129871584855,
797.50200201923406, 795.11389622132663, 796.14844240351033,
798.38503730579043, 800.36952968607989, 799.77504377643345,
798.06678499491068, 796.25147583824082, 795.50516223217448,
795.76289514321365, 795.14241786595369, 794.52342754832705,
794.09461266593451, 794.22640644577871, 794.40901080009826,
795.44041952057285, 797.08993526220502, 799.04415788525625,
799.69607710103776, 801.12130532294645, 802.58978111696047,
804.08704282949395, 805.30804406629886, 806.01045678190314,
806.1023660494991, 806.45841746708663, 804.62885083834897,
803.40733106802713, 802.7794198682094, 802.30322521664471,
799.86629282524939, 792.48353316255805, 785.62198706490324,
779.12907253342996, 773.85291607745944, 769.85775507429219,
765.83935713439735, 763.06526953038349, 759.99076978541211,
757.4080030532225, 751.87522809190875, 748.10419033342032,
743.80052183588418, 741.03779678694593, 737.66565808498058,
732.87820758604823, 726.8889307547239, 722.27241397434011,
717.7418681263797, 715.16688890795626]
def test_exponential_moving_average_period_6(self):
period = 6
ema = exponential_moving_average.exponential_moving_average(self.data, period)
np.testing.assert_array_equal(ema, self.ema_period_6_expected)
def test_exponential_moving_average_period_8(self):
period = 8
ema = exponential_moving_average.exponential_moving_average(self.data, period)
np.testing.assert_array_equal(ema, self.ema_period_8_expected)
def test_exponential_moving_average_period_10(self):
period = 10
ema = exponential_moving_average.exponential_moving_average(self.data, period)
np.testing.assert_array_equal(ema, self.ema_period_10_expected)
def test_exponential_moving_average_invalid_period(self):
period = 128
with self.assertRaises(Exception) as cm:
exponential_moving_average.exponential_moving_average(self.data, period)
expected = "Error: data_len < period"
self.assertEqual(str(cm.exception), expected)
| 5,234 |
1,414 | <filename>waterbox/bsnescore/bsnes/nall/hash/crc64.hpp
#pragma once
#include <nall/hash/hash.hpp>
namespace nall::Hash {
struct CRC64 : Hash {
using Hash::input;
CRC64(array_view<uint8_t> buffer = {}) {
reset();
input(buffer);
}
auto reset() -> void override {
checksum = ~0;
}
auto input(uint8_t value) -> void override {
checksum = (checksum >> 8) ^ table(checksum ^ value);
}
auto output() const -> vector<uint8_t> {
vector<uint8_t> result;
for(auto n : reverse(range(8))) result.append(~checksum >> n * 8);
return result;
}
auto value() const -> uint64_t {
return ~checksum;
}
private:
static auto table(uint8_t index) -> uint64_t {
static uint64_t table[256] = {0};
static bool initialized = false;
if(!initialized) {
initialized = true;
for(auto index : range(256)) {
uint64_t crc = index;
for(auto bit : range(8)) {
crc = (crc >> 1) ^ (crc & 1 ? 0xc96c'5795'd787'0f42 : 0);
}
table[index] = crc;
}
}
return table[index];
}
uint64_t checksum = 0;
};
}
| 483 |
1,253 | // Create dynamic matrices
// Implements basic operations
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
typedef struct {
float *data;
int rows, cols;
} matrix_t;
// Allocation and destruction
matrix_t *create_matrix(float numbers[], int rows, int cols);
void destroy_matrix(matrix_t *matrix);
// Get and set
float get_at_matrix(matrix_t *matrix, int row, int col);
void set_at_matrix(matrix_t *matrix, float value, int row, int col);
// In place operations
void scale_matrix(matrix_t *matrix, float scalar);
void add_matrix(matrix_t *lhs, matrix_t *rhs);
void subtract_matrix(matrix_t *lhs, matrix_t *rhs);
// New matrix
matrix_t *multiply_matrix(matrix_t *lhs, matrix_t *rhs);
// Display
void print_matrix(matrix_t *matrix, int spacing);
int
main(int argc, char *argv[]) {
float numbers[] = {5, 3, 2, 4};
float numbers_2[] = {3, 2, 1, 0};
matrix_t *test = create_matrix(numbers, 2, 2);
matrix_t *b = create_matrix(numbers_2, 2, 2);
printf("Initial matrix: \n");
print_matrix(test, 1);
printf("\nScale matrix by 3: \n");
scale_matrix(test, 3);
print_matrix(test, 1);
printf("\nAdd \n");
print_matrix(b, 1);
printf("to the original matrix:\n");
add_matrix(test, b);
print_matrix(test, 1);
printf("\nMultiply these matrices:\n");
matrix_t *product = multiply_matrix(test, b);
print_matrix(product, 1);
destroy_matrix(test);
destroy_matrix(b);
destroy_matrix(product);
return 0;
}
matrix_t *
create_matrix(float numbers[], int rows, int cols) {
matrix_t *matrix = (matrix_t *)malloc(sizeof(matrix_t));
assert(matrix);
matrix->data = (float *)calloc(rows * cols, sizeof(float));
assert(matrix->data);
if(numbers) {
int i;
for(i = 0; i < rows * cols; i++) {
matrix->data[i] = numbers[i];
}
}
matrix->rows = rows;
matrix->cols = cols;
return matrix;
}
void
destroy_matrix(matrix_t *matrix) {
free(matrix->data);
free(matrix);
}
float
get_at_matrix(matrix_t *matrix, int row, int col) {
assert(row >= 0 && row < matrix->rows);
assert(col >= 0 && col < matrix->cols);
return matrix->data[(row * matrix->rows) + col];
}
void
set_at_matrix(matrix_t *matrix, float value, int row, int col) {
assert(row >= 0 && row < matrix->rows);
assert(col >= 0 && col < matrix->cols);
matrix->data[(row * matrix->rows) + col] = value;
}
void
scale_matrix(matrix_t *matrix, float scalar) {
int i;
for(i = 0; i < matrix->rows * matrix->cols; i++) {
matrix->data[i] *= scalar;
}
}
void
add_matrix(matrix_t *lhs, matrix_t *rhs) {
assert(lhs->rows == rhs->rows && lhs->cols == rhs->cols);
int i;
for(i = 0; i < lhs->rows * lhs->cols; i++) {
lhs->data[i] += rhs->data[i];
}
}
void
subtract_matrix(matrix_t *lhs, matrix_t *rhs) {
assert(lhs->rows == rhs->rows && lhs->cols == rhs->cols);
int i;
for(i = 0; i < lhs->rows * lhs->cols; i++) {
lhs->data[i] -= rhs->data[i];
}
}
matrix_t *
multiply_matrix(matrix_t *lhs, matrix_t *rhs) {
assert(lhs->cols == rhs->rows);
// Unoptimized implementation (naive dotproduct solution)
matrix_t *product = create_matrix(NULL, lhs->rows, rhs->cols);
assert(product);
int i, j, c;
for(i = 0; i < lhs->rows; ++i) {
for(j = 0; j < rhs->cols; ++j) {
float dot = 0.0;
// Dot product of lhs row and rhs column
for(c = 0; c < lhs->cols; ++c) {
dot += get_at_matrix(lhs, i, c) * get_at_matrix(rhs, c, j);
}
set_at_matrix(product, dot, i, j);
}
}
return product;
}
void
print_matrix(matrix_t *matrix, int spacing) {
int i, j, s;
for(i = 0; i < matrix->rows; i++) {
for(j = 0; j < matrix->cols; j++) {
printf("%.2f", get_at_matrix(matrix, i, j));
for(s = 0; s < spacing; s++) {
putchar(' ');
}
}
putchar('\n');
}
} | 1,896 |
320 | /*
* Copyright (c) 2019- jMonkeyEngine
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* * Neither the name of 'jMonkeyEngine' nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.jme3.gde.core.sceneexplorer.nodes.actions.impl;
import com.jme3.gde.core.assets.ProjectAssetManager;
import com.jme3.material.Material;
import com.jme3.math.ColorRGBA;
import com.jme3.scene.Geometry;
import com.jme3.scene.Mesh;
/**
* This is the base Panel to be used within every "New Geometry/Primitive" dialogue
* as it will provide basic elements like MeshName, Material Color, etc.
*
* @author MeFisto94
*/
public class AbstractNewGeometryPanel extends javax.swing.JPanel {
/**
* Creates new form AbstractNewGeometryPanel
*/
public AbstractNewGeometryPanel() {
initComponents();
}
/**
* This method is called from within the constructor to initialize the form.
* WARNING: Do NOT modify this code. The content of this method is always
* regenerated by the Form Editor.
*/
@SuppressWarnings("unchecked")
// <editor-fold defaultstate="collapsed" desc="Generated Code">//GEN-BEGIN:initComponents
private void initComponents() {
inGeomName = new javax.swing.JTextField();
lblGeomName = new javax.swing.JLabel();
lblMatDef = new javax.swing.JLabel();
inMatDef = new javax.swing.JComboBox<>();
lblDefaultColor = new javax.swing.JLabel();
inRandomColor = new javax.swing.JCheckBox();
inGeomName.setText(org.openide.util.NbBundle.getMessage(AbstractNewGeometryPanel.class, "AbstractNewGeometryPanel.inGeomName.text")); // NOI18N
org.openide.awt.Mnemonics.setLocalizedText(lblGeomName, org.openide.util.NbBundle.getMessage(AbstractNewGeometryPanel.class, "AbstractNewGeometryPanel.lblGeomName.text")); // NOI18N
org.openide.awt.Mnemonics.setLocalizedText(lblMatDef, org.openide.util.NbBundle.getMessage(AbstractNewGeometryPanel.class, "AbstractNewGeometryPanel.text")); // NOI18N
lblMatDef.setToolTipText(org.openide.util.NbBundle.getMessage(AbstractNewGeometryPanel.class, "AbstractNewGeometryPanel.toolTipText")); // NOI18N
lblMatDef.setName(""); // NOI18N
inMatDef.setModel(new javax.swing.DefaultComboBoxModel<>(new String[] { "Common/MatDefs/Misc/Unshaded.j3md", "Common/MatDefs/Light/Lighting.j3md", "Common/MatDefs/Light/PBRLighting.j3md" }));
org.openide.awt.Mnemonics.setLocalizedText(lblDefaultColor, org.openide.util.NbBundle.getMessage(AbstractNewGeometryPanel.class, "AbstractNewGeometryPanel.lblDefaultColor.text")); // NOI18N
org.openide.awt.Mnemonics.setLocalizedText(inRandomColor, org.openide.util.NbBundle.getMessage(AbstractNewGeometryPanel.class, "AbstractNewGeometryPanel.inRandomColor.text")); // NOI18N
inRandomColor.setToolTipText(org.openide.util.NbBundle.getMessage(AbstractNewGeometryPanel.class, "AbstractNewGeometryPanel.inRandomColor.toolTipText")); // NOI18N
javax.swing.GroupLayout layout = new javax.swing.GroupLayout(this);
this.setLayout(layout);
layout.setHorizontalGroup(
layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(layout.createSequentialGroup()
.addContainerGap()
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(layout.createSequentialGroup()
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(lblGeomName)
.addComponent(lblMatDef))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(layout.createSequentialGroup()
.addComponent(inMatDef, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addGap(0, 0, Short.MAX_VALUE))
.addComponent(inGeomName)))
.addGroup(layout.createSequentialGroup()
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(lblDefaultColor)
.addComponent(inRandomColor))
.addGap(0, 0, Short.MAX_VALUE)))
.addContainerGap())
);
layout.setVerticalGroup(
layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(layout.createSequentialGroup()
.addGap(8, 8, 8)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(lblGeomName)
.addComponent(inGeomName, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
.addGroup(layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(lblMatDef)
.addComponent(inMatDef, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
.addComponent(lblDefaultColor)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED, 18, Short.MAX_VALUE)
.addComponent(inRandomColor)
.addContainerGap())
);
}// </editor-fold>//GEN-END:initComponents
// Variables declaration - do not modify//GEN-BEGIN:variables
private javax.swing.JTextField inGeomName;
private javax.swing.JComboBox<String> inMatDef;
private javax.swing.JCheckBox inRandomColor;
private javax.swing.JLabel lblDefaultColor;
private javax.swing.JLabel lblGeomName;
private javax.swing.JLabel lblMatDef;
// End of variables declaration//GEN-END:variables
public Geometry handleGeometry(ProjectAssetManager assetManager, Mesh m) {
Geometry geom = new Geometry(inGeomName.getText(), m);
ColorRGBA col;
if (inRandomColor.isSelected()) {
col = ColorRGBA.randomColor();
} else {
// @TODO: Add a Color Picker Dialog and preserve the state
col = ColorRGBA.Red;
}
Material mat = new Material(assetManager, inMatDef.getSelectedItem().toString());
switch (inMatDef.getSelectedItem().toString()) {
case "Common/MatDefs/Misc/Unshaded.j3md":
mat.setColor("Color", col);
break;
case "Common/MatDefs/Light/Lighting.j3md":
mat.setColor("Diffuse", col);
break;
case "Common/MatDefs/Light/PBRLighting.j3md":
mat.setColor("BaseColor", col);
break;
}
geom.setMaterial(mat);
return geom;
}
}
| 3,680 |
370 | <gh_stars>100-1000
package com.ifttt.sparklemotiondemo;
import android.app.Activity;
import android.os.Bundle;
import android.support.v4.view.ViewPager;
import com.ifttt.sparklemotion.Page;
import com.ifttt.sparklemotion.SparkleMotion;
import com.ifttt.sparklemotion.animations.RotationAnimation;
/**
* Demo Activity for {@link RotationAnimation}.
*/
public class RotationViewPagerActivity extends Activity {
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.single_view_pager_layout);
ViewPager viewPager =
(ViewPager) findViewById(R.id.view_pager);
viewPager.setAdapter(new PagerAdapter());
RotationAnimation rotationAnimation = new RotationAnimation(Page.allPages(), 0, 360);
SparkleMotion.with(viewPager) //
.animate(rotationAnimation) //
.on(R.id.pic_img_view);
}
}
| 379 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.